Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 1890 → Rev 1891

/programs/develop/libraries/pixman/Makefile
0,0 → 1,54
 
LIBRARY = pixman-1
 
CC = gcc
 
CFLAGS = -c -O2 -mmmx -Winline
 
DEFINES = -DHAVE_CONFIG_H -DPACKAGE -DPIXMAN_NO_TLS -DUSE_MMX
 
INCLUDES = -I../pixman -I../newlib/include -I../newlib/include/sys
 
SOURCES = \
pixman-image.c \
pixman-access.c \
pixman-access-accessors.c \
pixman-region16.c \
pixman-region32.c \
pixman-combine32.c \
pixman-combine64.c \
pixman-utils.c \
pixman-edge.c \
pixman-edge-accessors.c \
pixman-trap.c \
pixman-timer.c \
pixman-matrix.c \
pixman-gradient-walker.c \
pixman-linear-gradient.c \
pixman-radial-gradient.c \
pixman-bits-image.c \
pixman.c \
pixman-cpu.c \
pixman-fast-path.c \
pixman-implementation.c \
pixman-solid-fill.c \
pixman-general.c \
pixman-mmx.c \
$(NULL)
OBJECTS = $(patsubst %.c, %.o, $(SOURCES))
 
# targets
 
all:$(LIBRARY).a
 
 
$(LIBRARY).a: $(OBJECTS) Makefile
ar cvrs $(LIBRARY).a $(OBJECTS)
 
%.o: %.c $(SOURCES) Makefile
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ $<
 
 
 
/programs/develop/libraries/pixman/pixman-access-accessors.c
0,0 → 1,3
#define PIXMAN_FB_ACCESSORS
 
#include "pixman-access.c"
/programs/develop/libraries/pixman/pixman-access.c
0,0 → 1,2989
/*
*
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
* 2008 Aaron Plattner, NVIDIA Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <stdlib.h>
#include <string.h>
#include <assert.h>
 
#include "pixman-private.h"
#include "pixman-accessor.h"
 
#define CONVERT_RGB24_TO_Y15(s) \
(((((s) >> 16) & 0xff) * 153 + \
(((s) >> 8) & 0xff) * 301 + \
(((s) ) & 0xff) * 58) >> 2)
 
#define CONVERT_RGB24_TO_RGB15(s) \
((((s) >> 3) & 0x001f) | \
(((s) >> 6) & 0x03e0) | \
(((s) >> 9) & 0x7c00))
 
#define RGB15_TO_ENTRY(mif,rgb15) \
((mif)->ent[rgb15])
 
#define RGB24_TO_ENTRY(mif,rgb24) \
RGB15_TO_ENTRY (mif,CONVERT_RGB24_TO_RGB15 (rgb24))
 
#define RGB24_TO_ENTRY_Y(mif,rgb24) \
((mif)->ent[CONVERT_RGB24_TO_Y15 (rgb24)])
 
/*
* YV12 setup and access macros
*/
 
#define YV12_SETUP(image) \
bits_image_t *__bits_image = (bits_image_t *)image; \
uint32_t *bits = __bits_image->bits; \
int stride = __bits_image->rowstride; \
int offset0 = stride < 0 ? \
((-stride) >> 1) * ((__bits_image->height - 1) >> 1) - stride : \
stride * __bits_image->height; \
int offset1 = stride < 0 ? \
offset0 + ((-stride) >> 1) * ((__bits_image->height) >> 1) : \
offset0 + (offset0 >> 2)
 
/* Note no trailing semicolon on the above macro; if it's there, then
* the typical usage of YV12_SETUP(image); will have an extra trailing ;
* that some compilers will interpret as a statement -- and then any further
* variable declarations will cause an error.
*/
 
#define YV12_Y(line) \
((uint8_t *) ((bits) + (stride) * (line)))
 
#define YV12_U(line) \
((uint8_t *) ((bits) + offset1 + \
((stride) >> 1) * ((line) >> 1)))
 
#define YV12_V(line) \
((uint8_t *) ((bits) + offset0 + \
((stride) >> 1) * ((line) >> 1)))
 
/********************************** Fetch ************************************/
 
static void
fetch_scanline_a8r8g8b8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
MEMCPY_WRAPPED (image,
buffer, (const uint32_t *)bits + x,
width * sizeof(uint32_t));
}
 
static void
fetch_scanline_x8r8g8b8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (const uint32_t *)bits + x;
const uint32_t *end = pixel + width;
while (pixel < end)
*buffer++ = READ (image, pixel++) | 0xff000000;
}
 
static void
fetch_scanline_a8b8g8r8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
*buffer++ = (p & 0xff00ff00) |
((p >> 16) & 0xff) |
((p & 0xff) << 16);
}
}
 
static void
fetch_scanline_x8b8g8r8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
*buffer++ = 0xff000000 |
(p & 0x0000ff00) |
((p >> 16) & 0xff) |
((p & 0xff) << 16);
}
}
 
static void
fetch_scanline_b8g8r8a8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
 
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
 
*buffer++ = (((p & 0xff000000) >> 24) |
((p & 0x00ff0000) >> 8) |
((p & 0x0000ff00) << 8) |
((p & 0x000000ff) << 24));
}
}
 
static void
fetch_scanline_b8g8r8x8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
*buffer++ = (0xff000000 |
((p & 0xff000000) >> 24) |
((p & 0x00ff0000) >> 8) |
((p & 0x0000ff00) << 8));
}
}
 
static void
fetch_scanline_x14r6g6b6 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (const uint32_t *)bits + x;
const uint32_t *end = pixel + width;
 
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
 
r = ((p & 0x3f000) << 6) | ((p & 0x30000));
g = ((p & 0x00fc0) << 4) | ((p & 0x00c00) >> 2);
b = ((p & 0x0003f) << 2) | ((p & 0x00030) >> 4);
 
*buffer++ = 0xff000000 | r | g | b;
}
}
 
/* Expects a uint64_t buffer */
static void
fetch_scanline_a2r10g10b10 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * b,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = bits + x;
const uint32_t *end = pixel + width;
uint64_t *buffer = (uint64_t *)b;
 
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint64_t a = p >> 30;
uint64_t r = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t b = p & 0x3ff;
 
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
 
a <<= 14;
a |= a >> 2;
a |= a >> 4;
a |= a >> 8;
 
*buffer++ = a << 48 | r << 32 | g << 16 | b;
}
}
 
/* Expects a uint64_t buffer */
static void
fetch_scanline_x2r10g10b10 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * b,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
uint64_t *buffer = (uint64_t *)b;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint64_t r = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t b = p & 0x3ff;
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
*buffer++ = 0xffffULL << 48 | r << 32 | g << 16 | b;
}
}
 
/* Expects a uint64_t buffer */
static void
fetch_scanline_a2b10g10r10 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * b,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = bits + x;
const uint32_t *end = pixel + width;
uint64_t *buffer = (uint64_t *)b;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint64_t a = p >> 30;
uint64_t b = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t r = p & 0x3ff;
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
a <<= 14;
a |= a >> 2;
a |= a >> 4;
a |= a >> 8;
 
*buffer++ = a << 48 | r << 32 | g << 16 | b;
}
}
 
/* Expects a uint64_t buffer */
static void
fetch_scanline_x2b10g10r10 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * b,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
uint64_t *buffer = (uint64_t *)b;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint64_t b = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t r = p & 0x3ff;
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
*buffer++ = 0xffffULL << 48 | r << 32 | g << 16 | b;
}
}
 
static void
fetch_scanline_r8g8b8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + 3 * x;
const uint8_t *end = pixel + 3 * width;
while (pixel < end)
{
uint32_t b = 0xff000000;
#ifdef WORDS_BIGENDIAN
b |= (READ (image, pixel++) << 16);
b |= (READ (image, pixel++) << 8);
b |= (READ (image, pixel++));
#else
b |= (READ (image, pixel++));
b |= (READ (image, pixel++) << 8);
b |= (READ (image, pixel++) << 16);
#endif
*buffer++ = b;
}
}
 
static void
fetch_scanline_b8g8r8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + 3 * x;
const uint8_t *end = pixel + 3 * width;
while (pixel < end)
{
uint32_t b = 0xff000000;
#ifdef WORDS_BIGENDIAN
b |= (READ (image, pixel++));
b |= (READ (image, pixel++) << 8);
b |= (READ (image, pixel++) << 16);
#else
b |= (READ (image, pixel++) << 16);
b |= (READ (image, pixel++) << 8);
b |= (READ (image, pixel++));
#endif
*buffer++ = b;
}
}
 
static void
fetch_scanline_r5g6b5 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r = (((p) << 3) & 0xf8) |
(((p) << 5) & 0xfc00) |
(((p) << 8) & 0xf80000);
r |= (r >> 5) & 0x70007;
r |= (r >> 6) & 0x300;
*buffer++ = 0xff000000 | r;
}
}
 
static void
fetch_scanline_b5g6r5 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
b = ((p & 0xf800) | ((p & 0xe000) >> 5)) >> 8;
g = ((p & 0x07e0) | ((p & 0x0600) >> 6)) << 5;
r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a1r5g5b5 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b, a;
a = (uint32_t) ((uint8_t) (0 - ((p & 0x8000) >> 15))) << 24;
r = ((p & 0x7c00) | ((p & 0x7000) >> 5)) << 9;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
b = ((p & 0x001c) | ((p & 0x001f) << 5)) >> 2;
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_x1r5g5b5 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
r = ((p & 0x7c00) | ((p & 0x7000) >> 5)) << 9;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
b = ((p & 0x001c) | ((p & 0x001f) << 5)) >> 2;
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a1b5g5r5 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
uint32_t r, g, b, a;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
a = (uint32_t) ((uint8_t) (0 - ((p & 0x8000) >> 15))) << 24;
b = ((p & 0x7c00) | ((p & 0x7000) >> 5)) >> 7;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_x1b5g5r5 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
b = ((p & 0x7c00) | ((p & 0x7000) >> 5)) >> 7;
g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a4r4g4b4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b, a;
a = ((p & 0xf000) | ((p & 0xf000) >> 4)) << 16;
r = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) << 12;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
b = ((p & 0x000f) | ((p & 0x000f) << 4));
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_x4r4g4b4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
r = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) << 12;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
b = ((p & 0x000f) | ((p & 0x000f) << 4));
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a4b4g4r4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b, a;
a = ((p & 0xf000) | ((p & 0xf000) >> 4)) << 16;
b = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) >> 4;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
r = ((p & 0x000f) | ((p & 0x000f) << 4)) << 16;
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_x4b4g4r4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint16_t *pixel = (const uint16_t *)bits + x;
const uint16_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
b = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) >> 4;
g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
r = ((p & 0x000f) | ((p & 0x000f) << 4)) << 16;
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
while (pixel < end)
*buffer++ = READ (image, pixel++) << 24;
}
 
static void
fetch_scanline_r3g3b2 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
r = ((p & 0xe0) | ((p & 0xe0) >> 3) | ((p & 0xc0) >> 6)) << 16;
g = ((p & 0x1c) | ((p & 0x18) >> 3) | ((p & 0x1c) << 3)) << 8;
b = (((p & 0x03) ) |
((p & 0x03) << 2) |
((p & 0x03) << 4) |
((p & 0x03) << 6));
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_b2g3r3 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
 
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t r, g, b;
 
b = p & 0xc0;
b |= b >> 2;
b |= b >> 4;
b &= 0xff;
 
g = (p & 0x38) << 10;
g |= g >> 3;
g |= g >> 6;
g &= 0xff00;
 
r = (p & 0x7) << 21;
r |= r >> 3;
r |= r >> 6;
r &= 0xff0000;
 
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a2r2g2b2 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t a, r, g, b;
a = ((p & 0xc0) * 0x55) << 18;
r = ((p & 0x30) * 0x55) << 12;
g = ((p & 0x0c) * 0x55) << 6;
b = ((p & 0x03) * 0x55);
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_a2b2g2r2 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
uint32_t a, r, g, b;
a = ((p & 0xc0) * 0x55) << 18;
b = ((p & 0x30) * 0x55) >> 4;
g = ((p & 0x0c) * 0x55) << 6;
r = ((p & 0x03) * 0x55) << 16;
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_c8 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const pixman_indexed_t * indexed = image->bits.indexed;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
while (pixel < end)
{
uint32_t p = READ (image, pixel++);
*buffer++ = indexed->rgba[p];
}
}
 
static void
fetch_scanline_x4a4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint8_t *pixel = (const uint8_t *)bits + x;
const uint8_t *end = pixel + width;
while (pixel < end)
{
uint8_t p = READ (image, pixel++) & 0xf;
 
*buffer++ = (p | (p << 4)) << 24;
}
}
 
#define FETCH_8(img,l,o) (READ (img, (((uint8_t *)(l)) + ((o) >> 3))))
#ifdef WORDS_BIGENDIAN
#define FETCH_4(img,l,o) \
(((4 * (o)) & 4) ? (FETCH_8 (img,l, 4 * (o)) & 0xf) : (FETCH_8 (img,l,(4 * (o))) >> 4))
#else
#define FETCH_4(img,l,o) \
(((4 * (o)) & 4) ? (FETCH_8 (img, l, 4 * (o)) >> 4) : (FETCH_8 (img, l, (4 * (o))) & 0xf))
#endif
 
static void
fetch_scanline_a4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
 
p |= p << 4;
 
*buffer++ = p << 24;
}
}
 
static void
fetch_scanline_r1g2b1 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
uint32_t r, g, b;
r = ((p & 0x8) * 0xff) << 13;
g = ((p & 0x6) * 0x55) << 7;
b = ((p & 0x1) * 0xff);
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_b1g2r1 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
uint32_t r, g, b;
b = ((p & 0x8) * 0xff) >> 3;
g = ((p & 0x6) * 0x55) << 7;
r = ((p & 0x1) * 0xff) << 16;
 
*buffer++ = 0xff000000 | r | g | b;
}
}
 
static void
fetch_scanline_a1r1g1b1 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
uint32_t a, r, g, b;
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
 
a = ((p & 0x8) * 0xff) << 21;
r = ((p & 0x4) * 0xff) << 14;
g = ((p & 0x2) * 0xff) << 7;
b = ((p & 0x1) * 0xff);
 
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_a1b1g1r1 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
uint32_t a, r, g, b;
 
a = ((p & 0x8) * 0xff) << 21;
b = ((p & 0x4) * 0xff) >> 2;
g = ((p & 0x2) * 0xff) << 7;
r = ((p & 0x1) * 0xff) << 16;
 
*buffer++ = a | r | g | b;
}
}
 
static void
fetch_scanline_c4 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const pixman_indexed_t * indexed = image->bits.indexed;
int i;
for (i = 0; i < width; ++i)
{
uint32_t p = FETCH_4 (image, bits, i + x);
*buffer++ = indexed->rgba[p];
}
}
 
static void
fetch_scanline_a1 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
int i;
for (i = 0; i < width; ++i)
{
uint32_t p = READ (image, bits + ((i + x) >> 5));
uint32_t a;
#ifdef WORDS_BIGENDIAN
a = p >> (0x1f - ((i + x) & 0x1f));
#else
a = p >> ((i + x) & 0x1f);
#endif
a = a & 1;
a |= a << 1;
a |= a << 2;
a |= a << 4;
*buffer++ = a << 24;
}
}
 
static void
fetch_scanline_g1 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const pixman_indexed_t * indexed = image->bits.indexed;
int i;
for (i = 0; i < width; ++i)
{
uint32_t p = READ (image, bits + ((i + x) >> 5));
uint32_t a;
#ifdef WORDS_BIGENDIAN
a = p >> (0x1f - ((i + x) & 0x1f));
#else
a = p >> ((i + x) & 0x1f);
#endif
a = a & 1;
*buffer++ = indexed->rgba[a];
}
}
 
static void
fetch_scanline_yuy2 (pixman_image_t *image,
int x,
int line,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + image->bits.rowstride * line;
int i;
for (i = 0; i < width; i++)
{
int16_t y, u, v;
int32_t r, g, b;
y = ((uint8_t *) bits)[(x + i) << 1] - 16;
u = ((uint8_t *) bits)[(((x + i) << 1) & - 4) + 1] - 128;
v = ((uint8_t *) bits)[(((x + i) << 1) & - 4) + 3] - 128;
/* R = 1.164(Y - 16) + 1.596(V - 128) */
r = 0x012b27 * y + 0x019a2e * v;
/* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
/* B = 1.164(Y - 16) + 2.018(U - 128) */
b = 0x012b27 * y + 0x0206a2 * u;
*buffer++ = 0xff000000 |
(r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
(g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
}
}
 
static void
fetch_scanline_yv12 (pixman_image_t *image,
int x,
int line,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
YV12_SETUP (image);
uint8_t *y_line = YV12_Y (line);
uint8_t *u_line = YV12_U (line);
uint8_t *v_line = YV12_V (line);
int i;
for (i = 0; i < width; i++)
{
int16_t y, u, v;
int32_t r, g, b;
 
y = y_line[x + i] - 16;
u = u_line[(x + i) >> 1] - 128;
v = v_line[(x + i) >> 1] - 128;
 
/* R = 1.164(Y - 16) + 1.596(V - 128) */
r = 0x012b27 * y + 0x019a2e * v;
/* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
/* B = 1.164(Y - 16) + 2.018(U - 128) */
b = 0x012b27 * y + 0x0206a2 * u;
 
*buffer++ = 0xff000000 |
(r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
(g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
}
}
 
/**************************** Pixel wise fetching *****************************/
 
/* Despite the type, expects a uint64_t buffer */
static uint64_t
fetch_pixel_a2r10g10b10 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t p = READ (image, bits + offset);
uint64_t a = p >> 30;
uint64_t r = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t b = p & 0x3ff;
 
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
 
a <<= 14;
a |= a >> 2;
a |= a >> 4;
a |= a >> 8;
 
return a << 48 | r << 32 | g << 16 | b;
}
 
/* Despite the type, this function expects a uint64_t buffer */
static uint64_t
fetch_pixel_x2r10g10b10 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t p = READ (image, bits + offset);
uint64_t r = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t b = p & 0x3ff;
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
return 0xffffULL << 48 | r << 32 | g << 16 | b;
}
 
/* Despite the type, expects a uint64_t buffer */
static uint64_t
fetch_pixel_a2b10g10r10 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t p = READ (image, bits + offset);
uint64_t a = p >> 30;
uint64_t b = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t r = p & 0x3ff;
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
a <<= 14;
a |= a >> 2;
a |= a >> 4;
a |= a >> 8;
return a << 48 | r << 32 | g << 16 | b;
}
 
/* Despite the type, this function expects a uint64_t buffer */
static uint64_t
fetch_pixel_x2b10g10r10 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t p = READ (image, bits + offset);
uint64_t b = (p >> 20) & 0x3ff;
uint64_t g = (p >> 10) & 0x3ff;
uint64_t r = p & 0x3ff;
r = r << 6 | r >> 4;
g = g << 6 | g >> 4;
b = b << 6 | b >> 4;
return 0xffffULL << 48 | r << 32 | g << 16 | b;
}
 
static uint32_t
fetch_pixel_a8r8g8b8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
return READ (image, (uint32_t *)bits + offset);
}
 
static uint32_t
fetch_pixel_x8r8g8b8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
 
return READ (image, (uint32_t *)bits + offset) | 0xff000000;
}
 
static uint32_t
fetch_pixel_a8b8g8r8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint32_t *)bits + offset);
return ((pixel & 0xff000000) |
((pixel >> 16) & 0xff) |
(pixel & 0x0000ff00) |
((pixel & 0xff) << 16));
}
 
static uint32_t
fetch_pixel_x8b8g8r8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint32_t *)bits + offset);
return ((0xff000000) |
((pixel >> 16) & 0xff) |
(pixel & 0x0000ff00) |
((pixel & 0xff) << 16));
}
 
static uint32_t
fetch_pixel_b8g8r8a8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint32_t *)bits + offset);
return ((pixel & 0xff000000) >> 24 |
(pixel & 0x00ff0000) >> 8 |
(pixel & 0x0000ff00) << 8 |
(pixel & 0x000000ff) << 24);
}
 
static uint32_t
fetch_pixel_b8g8r8x8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint32_t *)bits + offset);
return ((0xff000000) |
(pixel & 0xff000000) >> 24 |
(pixel & 0x00ff0000) >> 8 |
(pixel & 0x0000ff00) << 8);
}
 
static uint32_t
fetch_pixel_x14r6g6b6 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint32_t *) bits + offset);
uint32_t r, g, b;
 
r = ((pixel & 0x3f000) << 6) | ((pixel & 0x30000));
g = ((pixel & 0x00fc0) << 4) | ((pixel & 0x00c00) >> 2);
b = ((pixel & 0x0003f) << 2) | ((pixel & 0x00030) >> 4);
 
return 0xff000000 | r | g | b;
}
 
static uint32_t
fetch_pixel_r8g8b8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint8_t *pixel = ((uint8_t *) bits) + (offset * 3);
#ifdef WORDS_BIGENDIAN
return (0xff000000 |
(READ (image, pixel + 0) << 16) |
(READ (image, pixel + 1) << 8) |
(READ (image, pixel + 2)));
#else
return (0xff000000 |
(READ (image, pixel + 2) << 16) |
(READ (image, pixel + 1) << 8) |
(READ (image, pixel + 0)));
#endif
}
 
static uint32_t
fetch_pixel_b8g8r8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint8_t *pixel = ((uint8_t *) bits) + (offset * 3);
#ifdef WORDS_BIGENDIAN
return (0xff000000 |
(READ (image, pixel + 2) << 16) |
(READ (image, pixel + 1) << 8) |
(READ (image, pixel + 0)));
#else
return (0xff000000 |
(READ (image, pixel + 0) << 16) |
(READ (image, pixel + 1) << 8) |
(READ (image, pixel + 2)));
#endif
}
 
static uint32_t
fetch_pixel_r5g6b5 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t r, g, b;
r = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) << 8;
g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_b5g6r5 (bits_image_t *image,
int offset,
int line)
{
uint32_t r, g, b;
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
b = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) >> 8;
g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_a1r5g5b5 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t a, r, g, b;
a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
return (a | r | g | b);
}
 
static uint32_t
fetch_pixel_x1r5g5b5 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t r, g, b;
r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_a1b5g5r5 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t a, r, g, b;
a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
return (a | r | g | b);
}
 
static uint32_t
fetch_pixel_x1b5g5r5 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t r, g, b;
b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_a4r4g4b4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t a, r, g, b;
a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
return (a | r | g | b);
}
 
static uint32_t
fetch_pixel_x4r4g4b4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t r, g, b;
r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_a4b4g4r4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t a, r, g, b;
a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
return (a | r | g | b);
}
 
static uint32_t
fetch_pixel_x4b4g4r4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint16_t *) bits + offset);
uint32_t r, g, b;
b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_a8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint8_t *) bits + offset);
return pixel << 24;
}
 
static uint32_t
fetch_pixel_r3g3b2 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint8_t *) bits + offset);
uint32_t r, g, b;
r = ((pixel & 0xe0) |
((pixel & 0xe0) >> 3) |
((pixel & 0xc0) >> 6)) << 16;
g = ((pixel & 0x1c) |
((pixel & 0x18) >> 3) |
((pixel & 0x1c) << 3)) << 8;
b = (((pixel & 0x03) ) |
((pixel & 0x03) << 2) |
((pixel & 0x03) << 4) |
((pixel & 0x03) << 6));
return (0xff000000 | r | g | b);
}
 
static uint32_t
fetch_pixel_b2g3r3 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t p = READ (image, (uint8_t *) bits + offset);
uint32_t r, g, b;
 
b = p & 0xc0;
b |= b >> 2;
b |= b >> 4;
b &= 0xff;
 
g = (p & 0x38) << 10;
g |= g >> 3;
g |= g >> 6;
g &= 0xff00;
 
r = (p & 0x7) << 21;
r |= r >> 3;
r |= r >> 6;
r &= 0xff0000;
 
return 0xff000000 | r | g | b;
}
 
static uint32_t
fetch_pixel_a2r2g2b2 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint8_t *) bits + offset);
uint32_t a, r, g, b;
a = ((pixel & 0xc0) * 0x55) << 18;
r = ((pixel & 0x30) * 0x55) << 12;
g = ((pixel & 0x0c) * 0x55) << 6;
b = ((pixel & 0x03) * 0x55);
return a | r | g | b;
}
 
static uint32_t
fetch_pixel_a2b2g2r2 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint8_t *) bits + offset);
uint32_t a, r, g, b;
a = ((pixel & 0xc0) * 0x55) << 18;
b = ((pixel & 0x30) * 0x55) >> 4;
g = ((pixel & 0x0c) * 0x55) << 6;
r = ((pixel & 0x03) * 0x55) << 16;
return a | r | g | b;
}
 
static uint32_t
fetch_pixel_c8 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint8_t *) bits + offset);
const pixman_indexed_t * indexed = image->indexed;
return indexed->rgba[pixel];
}
 
static uint32_t
fetch_pixel_x4a4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, (uint8_t *) bits + offset);
return ((pixel & 0xf) | ((pixel & 0xf) << 4)) << 24;
}
 
static uint32_t
fetch_pixel_a4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
pixel |= pixel << 4;
return pixel << 24;
}
 
static uint32_t
fetch_pixel_r1g2b1 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
uint32_t r, g, b;
r = ((pixel & 0x8) * 0xff) << 13;
g = ((pixel & 0x6) * 0x55) << 7;
b = ((pixel & 0x1) * 0xff);
return 0xff000000 | r | g | b;
}
 
static uint32_t
fetch_pixel_b1g2r1 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
uint32_t r, g, b;
b = ((pixel & 0x8) * 0xff) >> 3;
g = ((pixel & 0x6) * 0x55) << 7;
r = ((pixel & 0x1) * 0xff) << 16;
return 0xff000000 | r | g | b;
}
 
static uint32_t
fetch_pixel_a1r1g1b1 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
uint32_t a, r, g, b;
 
a = ((pixel & 0x8) * 0xff) << 21;
r = ((pixel & 0x4) * 0xff) << 14;
g = ((pixel & 0x2) * 0xff) << 7;
b = ((pixel & 0x1) * 0xff);
 
return a | r | g | b;
}
 
static uint32_t
fetch_pixel_a1b1g1r1 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
uint32_t a, r, g, b;
 
a = ((pixel & 0x8) * 0xff) << 21;
b = ((pixel & 0x4) * 0xff) >> 2;
g = ((pixel & 0x2) * 0xff) << 7;
r = ((pixel & 0x1) * 0xff) << 16;
 
return a | r | g | b;
}
 
static uint32_t
fetch_pixel_c4 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = FETCH_4 (image, bits, offset);
const pixman_indexed_t * indexed = image->indexed;
 
return indexed->rgba[pixel];
}
 
static uint32_t
fetch_pixel_a1 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, bits + (offset >> 5));
uint32_t a;
#ifdef WORDS_BIGENDIAN
a = pixel >> (0x1f - (offset & 0x1f));
#else
a = pixel >> (offset & 0x1f);
#endif
a = a & 1;
a |= a << 1;
a |= a << 2;
a |= a << 4;
return a << 24;
}
 
static uint32_t
fetch_pixel_g1 (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t pixel = READ (image, bits + (offset >> 5));
const pixman_indexed_t * indexed = image->indexed;
uint32_t a;
#ifdef WORDS_BIGENDIAN
a = pixel >> (0x1f - (offset & 0x1f));
#else
a = pixel >> (offset & 0x1f);
#endif
a = a & 1;
return indexed->rgba[a];
}
 
static uint32_t
fetch_pixel_yuy2 (bits_image_t *image,
int offset,
int line)
{
const uint32_t *bits = image->bits + image->rowstride * line;
int16_t y, u, v;
int32_t r, g, b;
y = ((uint8_t *) bits)[offset << 1] - 16;
u = ((uint8_t *) bits)[((offset << 1) & - 4) + 1] - 128;
v = ((uint8_t *) bits)[((offset << 1) & - 4) + 3] - 128;
/* R = 1.164(Y - 16) + 1.596(V - 128) */
r = 0x012b27 * y + 0x019a2e * v;
/* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
/* B = 1.164(Y - 16) + 2.018(U - 128) */
b = 0x012b27 * y + 0x0206a2 * u;
return 0xff000000 |
(r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
(g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
}
 
static uint32_t
fetch_pixel_yv12 (bits_image_t *image,
int offset,
int line)
{
YV12_SETUP (image);
int16_t y = YV12_Y (line)[offset] - 16;
int16_t u = YV12_U (line)[offset >> 1] - 128;
int16_t v = YV12_V (line)[offset >> 1] - 128;
int32_t r, g, b;
/* R = 1.164(Y - 16) + 1.596(V - 128) */
r = 0x012b27 * y + 0x019a2e * v;
/* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
/* B = 1.164(Y - 16) + 2.018(U - 128) */
b = 0x012b27 * y + 0x0206a2 * u;
return 0xff000000 |
(r >= 0 ? r < 0x1000000 ? r & 0xff0000 : 0xff0000 : 0) |
(g >= 0 ? g < 0x1000000 ? (g >> 8) & 0x00ff00 : 0x00ff00 : 0) |
(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
}
 
/*********************************** Store ************************************/
 
#define SPLIT_A(v) \
uint32_t a = ((v) >> 24), \
r = ((v) >> 16) & 0xff, \
g = ((v) >> 8) & 0xff, \
b = (v) & 0xff
 
#define SPLIT(v) \
uint32_t r = ((v) >> 16) & 0xff, \
g = ((v) >> 8) & 0xff, \
b = (v) & 0xff
 
static void
store_scanline_a2r10g10b10 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *v)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = bits + x;
uint64_t *values = (uint64_t *)v;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
((values[i] >> 32) & 0xc0000000) |
((values[i] >> 18) & 0x3ff00000) |
((values[i] >> 12) & 0xffc00) |
((values[i] >> 6) & 0x3ff));
}
}
 
static void
store_scanline_x2r10g10b10 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *v)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint64_t *values = (uint64_t *)v;
uint32_t *pixel = bits + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
((values[i] >> 18) & 0x3ff00000) |
((values[i] >> 12) & 0xffc00) |
((values[i] >> 6) & 0x3ff));
}
}
 
static void
store_scanline_a2b10g10r10 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *v)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = bits + x;
uint64_t *values = (uint64_t *)v;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
((values[i] >> 32) & 0xc0000000) |
((values[i] >> 38) & 0x3ff) |
((values[i] >> 12) & 0xffc00) |
((values[i] << 14) & 0x3ff00000));
}
}
 
static void
store_scanline_x2b10g10r10 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *v)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint64_t *values = (uint64_t *)v;
uint32_t *pixel = bits + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
((values[i] >> 38) & 0x3ff) |
((values[i] >> 12) & 0xffc00) |
((values[i] << 14) & 0x3ff00000));
}
}
 
static void
store_scanline_a8r8g8b8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
MEMCPY_WRAPPED (image, ((uint32_t *)bits) + x, values,
width * sizeof(uint32_t));
}
 
static void
store_scanline_x8r8g8b8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = (uint32_t *)bits + x;
int i;
for (i = 0; i < width; ++i)
WRITE (image, pixel++, values[i] & 0xffffff);
}
 
static void
store_scanline_a8b8g8r8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = (uint32_t *)bits + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
(values[i] & 0xff00ff00) |
((values[i] >> 16) & 0xff) |
((values[i] & 0xff) << 16));
}
}
 
static void
store_scanline_x8b8g8r8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = (uint32_t *)bits + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
(values[i] & 0x0000ff00) |
((values[i] >> 16) & 0xff) |
((values[i] & 0xff) << 16));
}
}
 
static void
store_scanline_b8g8r8a8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = (uint32_t *)bits + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
((values[i] >> 24) & 0x000000ff) |
((values[i] >> 8) & 0x0000ff00) |
((values[i] << 8) & 0x00ff0000) |
((values[i] << 24) & 0xff000000));
}
}
 
static void
store_scanline_b8g8r8x8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = (uint32_t *)bits + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++,
((values[i] >> 8) & 0x0000ff00) |
((values[i] << 8) & 0x00ff0000) |
((values[i] << 24) & 0xff000000));
}
}
 
static void
store_scanline_x14r6g6b6 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint32_t *pixel = ((uint32_t *) bits) + x;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = values[i];
uint32_t r, g, b;
 
r = (s & 0xfc0000) >> 6;
g = (s & 0x00fc00) >> 4;
b = (s & 0x0000fc) >> 2;
 
WRITE (image, pixel++, r | g | b);
}
}
 
static void
store_scanline_r8g8b8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + 3 * x;
int i;
for (i = 0; i < width; ++i)
{
uint32_t val = values[i];
#ifdef WORDS_BIGENDIAN
WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
WRITE (image, pixel++, (val & 0x0000ff00) >> 8);
WRITE (image, pixel++, (val & 0x000000ff) >> 0);
#else
WRITE (image, pixel++, (val & 0x000000ff) >> 0);
WRITE (image, pixel++, (val & 0x0000ff00) >> 8);
WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
#endif
}
}
 
static void
store_scanline_b8g8r8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + 3 * x;
int i;
for (i = 0; i < width; ++i)
{
uint32_t val = values[i];
#ifdef WORDS_BIGENDIAN
WRITE (image, pixel++, (val & 0x000000ff) >> 0);
WRITE (image, pixel++, (val & 0x0000ff00) >> 8);
WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
#else
WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
WRITE (image, pixel++, (val & 0x0000ff00) >> 8);
WRITE (image, pixel++, (val & 0x000000ff) >> 0);
#endif
}
}
 
static void
store_scanline_r5g6b5 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
uint32_t s = values[i];
WRITE (image, pixel++,
((s >> 3) & 0x001f) |
((s >> 5) & 0x07e0) |
((s >> 8) & 0xf800));
}
}
 
static void
store_scanline_b5g6r5 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++,
((b << 8) & 0xf800) |
((g << 3) & 0x07e0) |
((r >> 3) ));
}
}
 
static void
store_scanline_a1r5g5b5 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
WRITE (image, pixel++,
((a << 8) & 0x8000) |
((r << 7) & 0x7c00) |
((g << 2) & 0x03e0) |
((b >> 3) ));
}
}
 
static void
store_scanline_x1r5g5b5 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++,
((r << 7) & 0x7c00) |
((g << 2) & 0x03e0) |
((b >> 3) ));
}
}
 
static void
store_scanline_a1b5g5r5 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
WRITE (image, pixel++,
((a << 8) & 0x8000) |
((b << 7) & 0x7c00) |
((g << 2) & 0x03e0) |
((r >> 3) ));
}
}
 
static void
store_scanline_x1b5g5r5 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++, ((b << 7) & 0x7c00) |
((g << 2) & 0x03e0) |
((r >> 3) ));
}
}
 
static void
store_scanline_a4r4g4b4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
WRITE (image, pixel++,
((a << 8) & 0xf000) |
((r << 4) & 0x0f00) |
((g ) & 0x00f0) |
((b >> 4) ));
}
}
 
static void
store_scanline_x4r4g4b4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++,
((r << 4) & 0x0f00) |
((g ) & 0x00f0) |
((b >> 4) ));
}
}
 
static void
store_scanline_a4b4g4r4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
WRITE (image, pixel++, ((a << 8) & 0xf000) |
((b << 4) & 0x0f00) |
((g ) & 0x00f0) |
((r >> 4) ));
}
}
 
static void
store_scanline_x4b4g4r4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint16_t *pixel = ((uint16_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++,
((b << 4) & 0x0f00) |
((g ) & 0x00f0) |
((r >> 4) ));
}
}
 
static void
store_scanline_a8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
WRITE (image, pixel++, values[i] >> 24);
}
}
 
static void
store_scanline_r3g3b2 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++,
((r ) & 0xe0) |
((g >> 3) & 0x1c) |
((b >> 6) ));
}
}
 
static void
store_scanline_b2g3r3 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT (values[i]);
WRITE (image, pixel++,
((b ) & 0xc0) |
((g >> 2) & 0x38) |
((r >> 5) ));
}
}
 
static void
store_scanline_a2r2g2b2 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
WRITE (image, pixel++,
((a ) & 0xc0) |
((r >> 2) & 0x30) |
((g >> 4) & 0x0c) |
((b >> 6) ));
}
}
 
static void
store_scanline_a2b2g2r2 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
for (i = 0; i < width; ++i)
{
SPLIT_A (values[i]);
WRITE (image, pixel++,
((a ) & 0xc0) |
((b >> 2) & 0x30) |
((g >> 4) & 0x0c) |
((r >> 6) ));
}
}
 
static void
store_scanline_c8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
const pixman_indexed_t *indexed = image->indexed;
int i;
for (i = 0; i < width; ++i)
WRITE (image, pixel++, RGB24_TO_ENTRY (indexed,values[i]));
}
 
static void
store_scanline_g8 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
const pixman_indexed_t *indexed = image->indexed;
int i;
 
for (i = 0; i < width; ++i)
WRITE (image, pixel++, RGB24_TO_ENTRY_Y (indexed,values[i]));
}
 
static void
store_scanline_x4a4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint8_t *pixel = ((uint8_t *) bits) + x;
int i;
 
for (i = 0; i < width; ++i)
WRITE (image, pixel++, values[i] >> 28);
}
 
#define STORE_8(img,l,o,v) (WRITE (img, (uint8_t *)(l) + ((o) >> 3), (v)))
#ifdef WORDS_BIGENDIAN
 
#define STORE_4(img,l,o,v) \
do \
{ \
int bo = 4 * (o); \
int v4 = (v) & 0x0f; \
\
STORE_8 (img, l, bo, ( \
bo & 4 ? \
(FETCH_8 (img, l, bo) & 0xf0) | (v4) : \
(FETCH_8 (img, l, bo) & 0x0f) | (v4 << 4))); \
} while (0)
#else
 
#define STORE_4(img,l,o,v) \
do \
{ \
int bo = 4 * (o); \
int v4 = (v) & 0x0f; \
\
STORE_8 (img, l, bo, ( \
bo & 4 ? \
(FETCH_8 (img, l, bo) & 0x0f) | (v4 << 4) : \
(FETCH_8 (img, l, bo) & 0xf0) | (v4))); \
} while (0)
#endif
 
static void
store_scanline_a4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
 
for (i = 0; i < width; ++i)
STORE_4 (image, bits, i + x, values[i] >> 28);
}
 
static void
store_scanline_r1g2b1 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t pixel;
 
SPLIT (values[i]);
pixel = (((r >> 4) & 0x8) |
((g >> 5) & 0x6) |
((b >> 7) ));
STORE_4 (image, bits, i + x, pixel);
}
}
 
static void
store_scanline_b1g2r1 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t pixel;
 
SPLIT (values[i]);
pixel = (((b >> 4) & 0x8) |
((g >> 5) & 0x6) |
((r >> 7) ));
STORE_4 (image, bits, i + x, pixel);
}
}
 
static void
store_scanline_a1r1g1b1 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t pixel;
 
SPLIT_A (values[i]);
pixel = (((a >> 4) & 0x8) |
((r >> 5) & 0x4) |
((g >> 6) & 0x2) |
((b >> 7) ));
 
STORE_4 (image, bits, i + x, pixel);
}
}
 
static void
store_scanline_a1b1g1r1 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t pixel;
 
SPLIT_A (values[i]);
pixel = (((a >> 4) & 0x8) |
((b >> 5) & 0x4) |
((g >> 6) & 0x2) |
((r >> 7) ));
 
STORE_4 (image, bits, i + x, pixel);
}
}
 
static void
store_scanline_c4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
const pixman_indexed_t *indexed = image->indexed;
int i;
for (i = 0; i < width; ++i)
{
uint32_t pixel;
pixel = RGB24_TO_ENTRY (indexed, values[i]);
STORE_4 (image, bits, i + x, pixel);
}
}
 
static void
store_scanline_g4 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
const pixman_indexed_t *indexed = image->indexed;
int i;
for (i = 0; i < width; ++i)
{
uint32_t pixel;
pixel = RGB24_TO_ENTRY_Y (indexed, values[i]);
STORE_4 (image, bits, i + x, pixel);
}
}
 
static void
store_scanline_a1 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
int i;
for (i = 0; i < width; ++i)
{
uint32_t *pixel = ((uint32_t *) bits) + ((i + x) >> 5);
uint32_t mask, v;
#ifdef WORDS_BIGENDIAN
mask = 1 << (0x1f - ((i + x) & 0x1f));
#else
mask = 1 << ((i + x) & 0x1f);
#endif
v = values[i] & 0x80000000 ? mask : 0;
WRITE (image, pixel, (READ (image, pixel) & ~mask) | v);
}
}
 
static void
store_scanline_g1 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *bits = image->bits + image->rowstride * y;
const pixman_indexed_t *indexed = image->indexed;
int i;
for (i = 0; i < width; ++i)
{
uint32_t *pixel = ((uint32_t *) bits) + ((i + x) >> 5);
uint32_t mask, v;
#ifdef WORDS_BIGENDIAN
mask = 1 << (0x1f - ((i + x) & 0x1f));
#else
mask = 1 << ((i + x) & 0x1f);
#endif
v = RGB24_TO_ENTRY_Y (indexed, values[i]) & 0x1 ? mask : 0;
WRITE (image, pixel, (READ (image, pixel) & ~mask) | v);
}
}
 
/*
* Contracts a 64bpp image to 32bpp and then stores it using a regular 32-bit
* store proc. Despite the type, this function expects a uint64_t buffer.
*/
static void
store_scanline_generic_64 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values)
{
uint32_t *argb8_pixels;
assert (image->common.type == BITS);
argb8_pixels = pixman_malloc_ab (width, sizeof(uint32_t));
if (!argb8_pixels)
return;
/* Contract the scanline. We could do this in place if values weren't
* const.
*/
pixman_contract (argb8_pixels, (uint64_t *)values, width);
image->store_scanline_32 (image, x, y, width, argb8_pixels);
free (argb8_pixels);
}
 
/* Despite the type, this function expects both buffer
* and mask to be uint64_t
*/
static void
fetch_scanline_generic_64 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
pixman_format_code_t format;
/* Fetch the pixels into the first half of buffer and then expand them in
* place.
*/
image->bits.fetch_scanline_32 (image, x, y, width, buffer, NULL);
 
format = image->bits.format;
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_COLOR ||
PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_GRAY)
{
/* Indexed formats are mapped to a8r8g8b8 with full
* precision, so when expanding we shouldn't correct
* for the width of the channels
*/
format = PIXMAN_a8r8g8b8;
}
pixman_expand ((uint64_t *)buffer, buffer, format, width);
}
 
/* Despite the type, this function expects a uint64_t *buffer */
static uint64_t
fetch_pixel_generic_64 (bits_image_t *image,
int offset,
int line)
{
uint32_t pixel32 = image->fetch_pixel_32 (image, offset, line);
uint64_t result;
pixman_format_code_t format;
 
format = image->format;
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_COLOR ||
PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_GRAY)
{
/* Indexed formats are mapped to a8r8g8b8 with full
* precision, so when expanding we shouldn't correct
* for the width of the channels
*/
format = PIXMAN_a8r8g8b8;
}
pixman_expand ((uint64_t *)&result, &pixel32, format, 1);
 
return result;
}
 
/*
* XXX: The transformed fetch path only works at 32-bpp so far. When all
* paths have wide versions, this can be removed.
*
* WARNING: This function loses precision!
*/
static uint32_t
fetch_pixel_generic_lossy_32 (bits_image_t *image,
int offset,
int line)
{
uint64_t pixel64 = image->fetch_pixel_64 (image, offset, line);
uint32_t result;
pixman_contract (&result, &pixel64, 1);
 
return result;
}
 
typedef struct
{
pixman_format_code_t format;
fetch_scanline_t fetch_scanline_32;
fetch_scanline_t fetch_scanline_64;
fetch_pixel_32_t fetch_pixel_32;
fetch_pixel_64_t fetch_pixel_64;
store_scanline_t store_scanline_32;
store_scanline_t store_scanline_64;
} format_info_t;
 
#define FORMAT_INFO(format) \
{ \
PIXMAN_ ## format, \
fetch_scanline_ ## format, \
fetch_scanline_generic_64, \
fetch_pixel_ ## format, fetch_pixel_generic_64, \
store_scanline_ ## format, store_scanline_generic_64 \
}
 
static const format_info_t accessors[] =
{
/* 32 bpp formats */
FORMAT_INFO (a8r8g8b8),
FORMAT_INFO (x8r8g8b8),
FORMAT_INFO (a8b8g8r8),
FORMAT_INFO (x8b8g8r8),
FORMAT_INFO (b8g8r8a8),
FORMAT_INFO (b8g8r8x8),
FORMAT_INFO (x14r6g6b6),
 
/* 24bpp formats */
FORMAT_INFO (r8g8b8),
FORMAT_INFO (b8g8r8),
/* 16bpp formats */
FORMAT_INFO (r5g6b5),
FORMAT_INFO (b5g6r5),
FORMAT_INFO (a1r5g5b5),
FORMAT_INFO (x1r5g5b5),
FORMAT_INFO (a1b5g5r5),
FORMAT_INFO (x1b5g5r5),
FORMAT_INFO (a4r4g4b4),
FORMAT_INFO (x4r4g4b4),
FORMAT_INFO (a4b4g4r4),
FORMAT_INFO (x4b4g4r4),
/* 8bpp formats */
FORMAT_INFO (a8),
FORMAT_INFO (r3g3b2),
FORMAT_INFO (b2g3r3),
FORMAT_INFO (a2r2g2b2),
FORMAT_INFO (a2b2g2r2),
FORMAT_INFO (c8),
#define fetch_scanline_g8 fetch_scanline_c8
#define fetch_pixel_g8 fetch_pixel_c8
FORMAT_INFO (g8),
#define fetch_scanline_x4c4 fetch_scanline_c8
#define fetch_pixel_x4c4 fetch_pixel_c8
#define store_scanline_x4c4 store_scanline_c8
FORMAT_INFO (x4c4),
#define fetch_scanline_x4g4 fetch_scanline_c8
#define fetch_pixel_x4g4 fetch_pixel_c8
#define store_scanline_x4g4 store_scanline_g8
FORMAT_INFO (x4g4),
FORMAT_INFO (x4a4),
/* 4bpp formats */
FORMAT_INFO (a4),
FORMAT_INFO (r1g2b1),
FORMAT_INFO (b1g2r1),
FORMAT_INFO (a1r1g1b1),
FORMAT_INFO (a1b1g1r1),
FORMAT_INFO (c4),
#define fetch_scanline_g4 fetch_scanline_c4
#define fetch_pixel_g4 fetch_pixel_c4
FORMAT_INFO (g4),
/* 1bpp formats */
FORMAT_INFO (a1),
FORMAT_INFO (g1),
/* Wide formats */
{ PIXMAN_a2r10g10b10,
NULL, fetch_scanline_a2r10g10b10,
fetch_pixel_generic_lossy_32, fetch_pixel_a2r10g10b10,
NULL, store_scanline_a2r10g10b10 },
{ PIXMAN_x2r10g10b10,
NULL, fetch_scanline_x2r10g10b10,
fetch_pixel_generic_lossy_32, fetch_pixel_x2r10g10b10,
NULL, store_scanline_x2r10g10b10 },
{ PIXMAN_a2b10g10r10,
NULL, fetch_scanline_a2b10g10r10,
fetch_pixel_generic_lossy_32, fetch_pixel_a2b10g10r10,
NULL, store_scanline_a2b10g10r10 },
{ PIXMAN_x2b10g10r10,
NULL, fetch_scanline_x2b10g10r10,
fetch_pixel_generic_lossy_32, fetch_pixel_x2b10g10r10,
NULL, store_scanline_x2b10g10r10 },
/* YUV formats */
{ PIXMAN_yuy2,
fetch_scanline_yuy2, fetch_scanline_generic_64,
fetch_pixel_yuy2, fetch_pixel_generic_64,
NULL, NULL },
{ PIXMAN_yv12,
fetch_scanline_yv12, fetch_scanline_generic_64,
fetch_pixel_yv12, fetch_pixel_generic_64,
NULL, NULL },
{ PIXMAN_null },
};
 
static void
setup_accessors (bits_image_t *image)
{
const format_info_t *info = accessors;
while (info->format != PIXMAN_null)
{
if (info->format == image->format)
{
image->fetch_scanline_32 = info->fetch_scanline_32;
image->fetch_scanline_64 = info->fetch_scanline_64;
image->fetch_pixel_32 = info->fetch_pixel_32;
image->fetch_pixel_64 = info->fetch_pixel_64;
image->store_scanline_32 = info->store_scanline_32;
image->store_scanline_64 = info->store_scanline_64;
return;
}
info++;
}
}
 
#ifndef PIXMAN_FB_ACCESSORS
void
_pixman_bits_image_setup_accessors_accessors (bits_image_t *image);
 
void
_pixman_bits_image_setup_accessors (bits_image_t *image)
{
if (image->read_func || image->write_func)
_pixman_bits_image_setup_accessors_accessors (image);
else
setup_accessors (image);
}
 
#else
 
void
_pixman_bits_image_setup_accessors_accessors (bits_image_t *image)
{
setup_accessors (image);
}
 
#endif
/programs/develop/libraries/pixman/pixman-accessor.h
0,0 → 1,40
#ifdef PIXMAN_FB_ACCESSORS
 
#define ACCESS(sym) sym##_accessors
 
#define READ(img, ptr) \
(((bits_image_t *)(img))->read_func ((ptr), sizeof(*(ptr))))
#define WRITE(img, ptr,val) \
(((bits_image_t *)(img))->write_func ((ptr), (val), sizeof (*(ptr))))
 
#define MEMCPY_WRAPPED(img, dst, src, size) \
do { \
size_t _i; \
uint8_t *_dst = (uint8_t*)(dst), *_src = (uint8_t*)(src); \
for(_i = 0; _i < size; _i++) { \
WRITE((img), _dst +_i, READ((img), _src + _i)); \
} \
} while (0)
 
#define MEMSET_WRAPPED(img, dst, val, size) \
do { \
size_t _i; \
uint8_t *_dst = (uint8_t*)(dst); \
for(_i = 0; _i < (size_t) size; _i++) { \
WRITE((img), _dst +_i, (val)); \
} \
} while (0)
 
#else
 
#define ACCESS(sym) sym
 
#define READ(img, ptr) (*(ptr))
#define WRITE(img, ptr, val) (*(ptr) = (val))
#define MEMCPY_WRAPPED(img, dst, src, size) \
memcpy(dst, src, size)
#define MEMSET_WRAPPED(img, dst, val, size) \
memset(dst, val, size)
 
#endif
 
/programs/develop/libraries/pixman/pixman-bits-image.c
0,0 → 1,1331
/*
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
* 2008 Aaron Plattner, NVIDIA Corporation
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007, 2009 Red Hat, Inc.
* Copyright © 2008 André Tupinambá <andrelrt@gmail.com>
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pixman-private.h"
#include "pixman-combine32.h"
 
/* Store functions */
void
_pixman_image_store_scanline_32 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *buffer)
{
image->store_scanline_32 (image, x, y, width, buffer);
 
if (image->common.alpha_map)
{
x -= image->common.alpha_origin_x;
y -= image->common.alpha_origin_y;
 
image->common.alpha_map->store_scanline_32 (
image->common.alpha_map, x, y, width, buffer);
}
}
 
void
_pixman_image_store_scanline_64 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *buffer)
{
image->store_scanline_64 (image, x, y, width, buffer);
 
if (image->common.alpha_map)
{
x -= image->common.alpha_origin_x;
y -= image->common.alpha_origin_y;
 
image->common.alpha_map->store_scanline_64 (
image->common.alpha_map, x, y, width, buffer);
}
}
 
/* Fetch functions */
 
static force_inline uint32_t
fetch_pixel_no_alpha (bits_image_t *image,
int x, int y, pixman_bool_t check_bounds)
{
if (check_bounds &&
(x < 0 || x >= image->width || y < 0 || y >= image->height))
{
return 0;
}
 
return image->fetch_pixel_32 (image, x, y);
}
 
typedef uint32_t (* get_pixel_t) (bits_image_t *image,
int x, int y, pixman_bool_t check_bounds);
 
static force_inline void
repeat (pixman_repeat_t repeat, int size, int *coord)
{
switch (repeat)
{
case PIXMAN_REPEAT_NORMAL:
*coord = MOD (*coord, size);
break;
 
case PIXMAN_REPEAT_PAD:
*coord = CLIP (*coord, 0, size - 1);
break;
 
case PIXMAN_REPEAT_REFLECT:
*coord = MOD (*coord, size * 2);
 
if (*coord >= size)
*coord = size * 2 - *coord - 1;
break;
 
case PIXMAN_REPEAT_NONE:
break;
 
default:
break;
}
}
 
static force_inline uint32_t
bits_image_fetch_pixel_nearest (bits_image_t *image,
pixman_fixed_t x,
pixman_fixed_t y,
get_pixel_t get_pixel)
{
int x0 = pixman_fixed_to_int (x - pixman_fixed_e);
int y0 = pixman_fixed_to_int (y - pixman_fixed_e);
 
if (image->common.repeat != PIXMAN_REPEAT_NONE)
{
repeat (image->common.repeat, image->width, &x0);
repeat (image->common.repeat, image->height, &y0);
 
return get_pixel (image, x0, y0, FALSE);
}
else
{
return get_pixel (image, x0, y0, TRUE);
}
}
 
#if SIZEOF_LONG > 4
 
static force_inline uint32_t
bilinear_interpolation (uint32_t tl, uint32_t tr,
uint32_t bl, uint32_t br,
int distx, int disty)
{
uint64_t distxy, distxiy, distixy, distixiy;
uint64_t tl64, tr64, bl64, br64;
uint64_t f, r;
 
distxy = distx * disty;
distxiy = distx * (256 - disty);
distixy = (256 - distx) * disty;
distixiy = (256 - distx) * (256 - disty);
 
/* Alpha and Blue */
tl64 = tl & 0xff0000ff;
tr64 = tr & 0xff0000ff;
bl64 = bl & 0xff0000ff;
br64 = br & 0xff0000ff;
 
f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy;
r = f & 0x0000ff0000ff0000ull;
 
/* Red and Green */
tl64 = tl;
tl64 = ((tl64 << 16) & 0x000000ff00000000ull) | (tl64 & 0x0000ff00ull);
 
tr64 = tr;
tr64 = ((tr64 << 16) & 0x000000ff00000000ull) | (tr64 & 0x0000ff00ull);
 
bl64 = bl;
bl64 = ((bl64 << 16) & 0x000000ff00000000ull) | (bl64 & 0x0000ff00ull);
 
br64 = br;
br64 = ((br64 << 16) & 0x000000ff00000000ull) | (br64 & 0x0000ff00ull);
 
f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy;
r |= ((f >> 16) & 0x000000ff00000000ull) | (f & 0xff000000ull);
 
return (uint32_t)(r >> 16);
}
 
#else
 
static force_inline uint32_t
bilinear_interpolation (uint32_t tl, uint32_t tr,
uint32_t bl, uint32_t br,
int distx, int disty)
{
int distxy, distxiy, distixy, distixiy;
uint32_t f, r;
 
distxy = distx * disty;
distxiy = (distx << 8) - distxy; /* distx * (256 - disty) */
distixy = (disty << 8) - distxy; /* disty * (256 - distx) */
distixiy =
256 * 256 - (disty << 8) -
(distx << 8) + distxy; /* (256 - distx) * (256 - disty) */
 
/* Blue */
r = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy
+ (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy;
 
/* Green */
f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy
+ (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy;
r |= f & 0xff000000;
 
tl >>= 16;
tr >>= 16;
bl >>= 16;
br >>= 16;
r >>= 16;
 
/* Red */
f = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy
+ (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy;
r |= f & 0x00ff0000;
 
/* Alpha */
f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy
+ (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy;
r |= f & 0xff000000;
 
return r;
}
 
#endif
 
static force_inline uint32_t
bits_image_fetch_pixel_bilinear (bits_image_t *image,
pixman_fixed_t x,
pixman_fixed_t y,
get_pixel_t get_pixel)
{
pixman_repeat_t repeat_mode = image->common.repeat;
int width = image->width;
int height = image->height;
int x1, y1, x2, y2;
uint32_t tl, tr, bl, br;
int32_t distx, disty;
 
x1 = x - pixman_fixed_1 / 2;
y1 = y - pixman_fixed_1 / 2;
 
distx = (x1 >> 8) & 0xff;
disty = (y1 >> 8) & 0xff;
 
x1 = pixman_fixed_to_int (x1);
y1 = pixman_fixed_to_int (y1);
x2 = x1 + 1;
y2 = y1 + 1;
 
if (repeat_mode != PIXMAN_REPEAT_NONE)
{
repeat (repeat_mode, width, &x1);
repeat (repeat_mode, height, &y1);
repeat (repeat_mode, width, &x2);
repeat (repeat_mode, height, &y2);
 
tl = get_pixel (image, x1, y1, FALSE);
bl = get_pixel (image, x1, y2, FALSE);
tr = get_pixel (image, x2, y1, FALSE);
br = get_pixel (image, x2, y2, FALSE);
}
else
{
tl = get_pixel (image, x1, y1, TRUE);
tr = get_pixel (image, x2, y1, TRUE);
bl = get_pixel (image, x1, y2, TRUE);
br = get_pixel (image, x2, y2, TRUE);
}
 
return bilinear_interpolation (tl, tr, bl, br, distx, disty);
}
 
static void
bits_image_fetch_bilinear_no_repeat_8888 (pixman_image_t * ima,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
bits_image_t *bits = &ima->bits;
pixman_fixed_t x_top, x_bottom, x;
pixman_fixed_t ux_top, ux_bottom, ux;
pixman_vector_t v;
uint32_t top_mask, bottom_mask;
uint32_t *top_row;
uint32_t *bottom_row;
uint32_t *end;
uint32_t zero[2] = { 0, 0 };
uint32_t one = 1;
int y, y1, y2;
int disty;
int mask_inc;
int w;
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (!pixman_transform_point_3d (bits->common.transform, &v))
return;
 
ux = ux_top = ux_bottom = bits->common.transform->matrix[0][0];
x = x_top = x_bottom = v.vector[0] - pixman_fixed_1/2;
 
y = v.vector[1] - pixman_fixed_1/2;
disty = (y >> 8) & 0xff;
 
/* Load the pointers to the first and second lines from the source
* image that bilinear code must read.
*
* The main trick in this code is about the check if any line are
* outside of the image;
*
* When I realize that a line (any one) is outside, I change
* the pointer to a dummy area with zeros. Once I change this, I
* must be sure the pointer will not change, so I set the
* variables to each pointer increments inside the loop.
*/
y1 = pixman_fixed_to_int (y);
y2 = y1 + 1;
 
if (y1 < 0 || y1 >= bits->height)
{
top_row = zero;
x_top = 0;
ux_top = 0;
}
else
{
top_row = bits->bits + y1 * bits->rowstride;
x_top = x;
ux_top = ux;
}
 
if (y2 < 0 || y2 >= bits->height)
{
bottom_row = zero;
x_bottom = 0;
ux_bottom = 0;
}
else
{
bottom_row = bits->bits + y2 * bits->rowstride;
x_bottom = x;
ux_bottom = ux;
}
 
/* Instead of checking whether the operation uses the mast in
* each loop iteration, verify this only once and prepare the
* variables to make the code smaller inside the loop.
*/
if (!mask)
{
mask_inc = 0;
mask = &one;
}
else
{
/* If have a mask, prepare the variables to check it */
mask_inc = 1;
}
 
/* If both are zero, then the whole thing is zero */
if (top_row == zero && bottom_row == zero)
{
memset (buffer, 0, width * sizeof (uint32_t));
return;
}
else if (bits->format == PIXMAN_x8r8g8b8)
{
if (top_row == zero)
{
top_mask = 0;
bottom_mask = 0xff000000;
}
else if (bottom_row == zero)
{
top_mask = 0xff000000;
bottom_mask = 0;
}
else
{
top_mask = 0xff000000;
bottom_mask = 0xff000000;
}
}
else
{
top_mask = 0;
bottom_mask = 0;
}
 
end = buffer + width;
 
/* Zero fill to the left of the image */
while (buffer < end && x < pixman_fixed_minus_1)
{
*buffer++ = 0;
x += ux;
x_top += ux_top;
x_bottom += ux_bottom;
mask += mask_inc;
}
 
/* Left edge
*/
while (buffer < end && x < 0)
{
uint32_t tr, br;
int32_t distx;
 
tr = top_row[pixman_fixed_to_int (x_top) + 1] | top_mask;
br = bottom_row[pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
 
distx = (x >> 8) & 0xff;
 
*buffer++ = bilinear_interpolation (0, tr, 0, br, distx, disty);
 
x += ux;
x_top += ux_top;
x_bottom += ux_bottom;
mask += mask_inc;
}
 
/* Main part */
w = pixman_int_to_fixed (bits->width - 1);
 
while (buffer < end && x < w)
{
if (*mask)
{
uint32_t tl, tr, bl, br;
int32_t distx;
 
tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
tr = top_row [pixman_fixed_to_int (x_top) + 1] | top_mask;
bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
br = bottom_row [pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
 
distx = (x >> 8) & 0xff;
 
*buffer = bilinear_interpolation (tl, tr, bl, br, distx, disty);
}
 
buffer++;
x += ux;
x_top += ux_top;
x_bottom += ux_bottom;
mask += mask_inc;
}
 
/* Right Edge */
w = pixman_int_to_fixed (bits->width);
while (buffer < end && x < w)
{
if (*mask)
{
uint32_t tl, bl;
int32_t distx;
 
tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
 
distx = (x >> 8) & 0xff;
 
*buffer = bilinear_interpolation (tl, 0, bl, 0, distx, disty);
}
 
buffer++;
x += ux;
x_top += ux_top;
x_bottom += ux_bottom;
mask += mask_inc;
}
 
/* Zero fill to the left of the image */
while (buffer < end)
*buffer++ = 0;
}
 
static force_inline uint32_t
bits_image_fetch_pixel_convolution (bits_image_t *image,
pixman_fixed_t x,
pixman_fixed_t y,
get_pixel_t get_pixel)
{
pixman_fixed_t *params = image->common.filter_params;
int x_off = (params[0] - pixman_fixed_1) >> 1;
int y_off = (params[1] - pixman_fixed_1) >> 1;
int32_t cwidth = pixman_fixed_to_int (params[0]);
int32_t cheight = pixman_fixed_to_int (params[1]);
int32_t srtot, sgtot, sbtot, satot;
int32_t i, j, x1, x2, y1, y2;
pixman_repeat_t repeat_mode = image->common.repeat;
int width = image->width;
int height = image->height;
 
params += 2;
 
x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off);
y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off);
x2 = x1 + cwidth;
y2 = y1 + cheight;
 
srtot = sgtot = sbtot = satot = 0;
 
for (i = y1; i < y2; ++i)
{
for (j = x1; j < x2; ++j)
{
int rx = j;
int ry = i;
 
pixman_fixed_t f = *params;
 
if (f)
{
uint32_t pixel;
 
if (repeat_mode != PIXMAN_REPEAT_NONE)
{
repeat (repeat_mode, width, &rx);
repeat (repeat_mode, height, &ry);
 
pixel = get_pixel (image, rx, ry, FALSE);
}
else
{
pixel = get_pixel (image, rx, ry, TRUE);
}
 
srtot += RED_8 (pixel) * f;
sgtot += GREEN_8 (pixel) * f;
sbtot += BLUE_8 (pixel) * f;
satot += ALPHA_8 (pixel) * f;
}
 
params++;
}
}
 
satot >>= 16;
srtot >>= 16;
sgtot >>= 16;
sbtot >>= 16;
 
satot = CLIP (satot, 0, 0xff);
srtot = CLIP (srtot, 0, 0xff);
sgtot = CLIP (sgtot, 0, 0xff);
sbtot = CLIP (sbtot, 0, 0xff);
 
return ((satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot));
}
 
static force_inline uint32_t
bits_image_fetch_pixel_filtered (bits_image_t *image,
pixman_fixed_t x,
pixman_fixed_t y,
get_pixel_t get_pixel)
{
switch (image->common.filter)
{
case PIXMAN_FILTER_NEAREST:
case PIXMAN_FILTER_FAST:
return bits_image_fetch_pixel_nearest (image, x, y, get_pixel);
break;
 
case PIXMAN_FILTER_BILINEAR:
case PIXMAN_FILTER_GOOD:
case PIXMAN_FILTER_BEST:
return bits_image_fetch_pixel_bilinear (image, x, y, get_pixel);
break;
 
case PIXMAN_FILTER_CONVOLUTION:
return bits_image_fetch_pixel_convolution (image, x, y, get_pixel);
break;
 
default:
break;
}
 
return 0;
}
 
static void
bits_image_fetch_affine_no_alpha (pixman_image_t * image,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
pixman_fixed_t x, y;
pixman_fixed_t ux, uy;
pixman_vector_t v;
int i;
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (image->common.transform)
{
if (!pixman_transform_point_3d (image->common.transform, &v))
return;
 
ux = image->common.transform->matrix[0][0];
uy = image->common.transform->matrix[1][0];
}
else
{
ux = pixman_fixed_1;
uy = 0;
}
 
x = v.vector[0];
y = v.vector[1];
 
for (i = 0; i < width; ++i)
{
if (!mask || mask[i])
{
buffer[i] = bits_image_fetch_pixel_filtered (
&image->bits, x, y, fetch_pixel_no_alpha);
}
 
x += ux;
y += uy;
}
}
 
/* General fetcher */
static force_inline uint32_t
fetch_pixel_general (bits_image_t *image, int x, int y, pixman_bool_t check_bounds)
{
uint32_t pixel;
 
if (check_bounds &&
(x < 0 || x >= image->width || y < 0 || y >= image->height))
{
return 0;
}
 
pixel = image->fetch_pixel_32 (image, x, y);
 
if (image->common.alpha_map)
{
uint32_t pixel_a;
 
x -= image->common.alpha_origin_x;
y -= image->common.alpha_origin_y;
 
if (x < 0 || x >= image->common.alpha_map->width ||
y < 0 || y >= image->common.alpha_map->height)
{
pixel_a = 0;
}
else
{
pixel_a = image->common.alpha_map->fetch_pixel_32 (
image->common.alpha_map, x, y);
 
pixel_a = ALPHA_8 (pixel_a);
}
 
pixel &= 0x00ffffff;
pixel |= (pixel_a << 24);
}
 
return pixel;
}
 
static void
bits_image_fetch_general (pixman_image_t * image,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
pixman_fixed_t x, y, w;
pixman_fixed_t ux, uy, uw;
pixman_vector_t v;
int i;
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (image->common.transform)
{
if (!pixman_transform_point_3d (image->common.transform, &v))
return;
 
ux = image->common.transform->matrix[0][0];
uy = image->common.transform->matrix[1][0];
uw = image->common.transform->matrix[2][0];
}
else
{
ux = pixman_fixed_1;
uy = 0;
uw = 0;
}
 
x = v.vector[0];
y = v.vector[1];
w = v.vector[2];
 
for (i = 0; i < width; ++i)
{
pixman_fixed_t x0, y0;
 
if (!mask || mask[i])
{
if (w != 0)
{
x0 = ((pixman_fixed_48_16_t)x << 16) / w;
y0 = ((pixman_fixed_48_16_t)y << 16) / w;
}
else
{
x0 = 0;
y0 = 0;
}
 
buffer[i] = bits_image_fetch_pixel_filtered (
&image->bits, x0, y0, fetch_pixel_general);
}
 
x += ux;
y += uy;
w += uw;
}
}
 
static const uint8_t zero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
 
typedef uint32_t (* convert_pixel_t) (const uint8_t *row, int x);
 
static force_inline void
bits_image_fetch_bilinear_affine (pixman_image_t * image,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask,
 
convert_pixel_t convert_pixel,
pixman_format_code_t format,
pixman_repeat_t repeat_mode)
{
pixman_fixed_t x, y;
pixman_fixed_t ux, uy;
pixman_vector_t v;
bits_image_t *bits = &image->bits;
int i;
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (!pixman_transform_point_3d (image->common.transform, &v))
return;
 
ux = image->common.transform->matrix[0][0];
uy = image->common.transform->matrix[1][0];
 
x = v.vector[0];
y = v.vector[1];
 
for (i = 0; i < width; ++i)
{
int x1, y1, x2, y2;
uint32_t tl, tr, bl, br;
int32_t distx, disty;
int width = image->bits.width;
int height = image->bits.height;
const uint8_t *row1;
const uint8_t *row2;
 
if (mask && !mask[i])
goto next;
 
x1 = x - pixman_fixed_1 / 2;
y1 = y - pixman_fixed_1 / 2;
 
distx = (x1 >> 8) & 0xff;
disty = (y1 >> 8) & 0xff;
 
y1 = pixman_fixed_to_int (y1);
y2 = y1 + 1;
x1 = pixman_fixed_to_int (x1);
x2 = x1 + 1;
 
if (repeat_mode != PIXMAN_REPEAT_NONE)
{
uint32_t mask;
 
mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
 
repeat (repeat_mode, width, &x1);
repeat (repeat_mode, height, &y1);
repeat (repeat_mode, width, &x2);
repeat (repeat_mode, height, &y2);
 
row1 = (uint8_t *)bits->bits + bits->rowstride * 4 * y1;
row2 = (uint8_t *)bits->bits + bits->rowstride * 4 * y2;
 
tl = convert_pixel (row1, x1) | mask;
tr = convert_pixel (row1, x2) | mask;
bl = convert_pixel (row2, x1) | mask;
br = convert_pixel (row2, x2) | mask;
}
else
{
uint32_t mask1, mask2;
int bpp;
 
/* Note: PIXMAN_FORMAT_BPP() returns an unsigned value,
* which means if you use it in expressions, those
* expressions become unsigned themselves. Since
* the variables below can be negative in some cases,
* that will lead to crashes on 64 bit architectures.
*
* So this line makes sure bpp is signed
*/
bpp = PIXMAN_FORMAT_BPP (format);
 
if (x1 >= width || x2 < 0 || y1 >= height || y2 < 0)
{
buffer[i] = 0;
goto next;
}
 
if (y2 == 0)
{
row1 = zero;
mask1 = 0;
}
else
{
row1 = (uint8_t *)bits->bits + bits->rowstride * 4 * y1;
row1 += bpp / 8 * x1;
 
mask1 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
}
 
if (y1 == height - 1)
{
row2 = zero;
mask2 = 0;
}
else
{
row2 = (uint8_t *)bits->bits + bits->rowstride * 4 * y2;
row2 += bpp / 8 * x1;
 
mask2 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000;
}
 
if (x2 == 0)
{
tl = 0;
bl = 0;
}
else
{
tl = convert_pixel (row1, 0) | mask1;
bl = convert_pixel (row2, 0) | mask2;
}
 
if (x1 == width - 1)
{
tr = 0;
br = 0;
}
else
{
tr = convert_pixel (row1, 1) | mask1;
br = convert_pixel (row2, 1) | mask2;
}
}
 
buffer[i] = bilinear_interpolation (
tl, tr, bl, br, distx, disty);
 
next:
x += ux;
y += uy;
}
}
 
static force_inline uint32_t
convert_a8r8g8b8 (const uint8_t *row, int x)
{
return *(((uint32_t *)row) + x);
}
 
static force_inline uint32_t
convert_x8r8g8b8 (const uint8_t *row, int x)
{
return *(((uint32_t *)row) + x);
}
 
static force_inline uint32_t
convert_a8 (const uint8_t *row, int x)
{
return *(row + x) << 24;
}
 
static force_inline uint32_t
convert_r5g6b5 (const uint8_t *row, int x)
{
return CONVERT_0565_TO_0888 (*((uint16_t *)row + x));
}
 
#define MAKE_BILINEAR_FETCHER(name, format, repeat_mode) \
static void \
bits_image_fetch_bilinear_affine_ ## name (pixman_image_t *image, \
int offset, \
int line, \
int width, \
uint32_t * buffer, \
const uint32_t * mask) \
{ \
bits_image_fetch_bilinear_affine (image, offset, line, width, buffer, mask, \
convert_ ## format, \
PIXMAN_ ## format, \
repeat_mode); \
} \
extern int no_such_variable
 
MAKE_BILINEAR_FETCHER (pad_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_PAD);
MAKE_BILINEAR_FETCHER (none_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NONE);
MAKE_BILINEAR_FETCHER (reflect_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_REFLECT);
MAKE_BILINEAR_FETCHER (normal_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NORMAL);
MAKE_BILINEAR_FETCHER (pad_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_PAD);
MAKE_BILINEAR_FETCHER (none_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NONE);
MAKE_BILINEAR_FETCHER (reflect_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_REFLECT);
MAKE_BILINEAR_FETCHER (normal_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NORMAL);
MAKE_BILINEAR_FETCHER (pad_a8, a8, PIXMAN_REPEAT_PAD);
MAKE_BILINEAR_FETCHER (none_a8, a8, PIXMAN_REPEAT_NONE);
MAKE_BILINEAR_FETCHER (reflect_a8, a8, PIXMAN_REPEAT_REFLECT);
MAKE_BILINEAR_FETCHER (normal_a8, a8, PIXMAN_REPEAT_NORMAL);
MAKE_BILINEAR_FETCHER (pad_r5g6b5, r5g6b5, PIXMAN_REPEAT_PAD);
MAKE_BILINEAR_FETCHER (none_r5g6b5, r5g6b5, PIXMAN_REPEAT_NONE);
MAKE_BILINEAR_FETCHER (reflect_r5g6b5, r5g6b5, PIXMAN_REPEAT_REFLECT);
MAKE_BILINEAR_FETCHER (normal_r5g6b5, r5g6b5, PIXMAN_REPEAT_NORMAL);
 
static void
bits_image_fetch_solid_32 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
uint32_t color;
uint32_t *end;
 
color = image->bits.fetch_pixel_32 (&image->bits, 0, 0);
 
end = buffer + width;
while (buffer < end)
*(buffer++) = color;
}
 
static void
bits_image_fetch_solid_64 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * b,
const uint32_t * unused)
{
uint64_t color;
uint64_t *buffer = (uint64_t *)b;
uint64_t *end;
 
color = image->bits.fetch_pixel_64 (&image->bits, 0, 0);
 
end = buffer + width;
while (buffer < end)
*(buffer++) = color;
}
 
static void
bits_image_fetch_untransformed_repeat_none (bits_image_t *image,
pixman_bool_t wide,
int x,
int y,
int width,
uint32_t * buffer)
{
uint32_t w;
 
if (y < 0 || y >= image->height)
{
memset (buffer, 0, width * (wide? 8 : 4));
return;
}
 
if (x < 0)
{
w = MIN (width, -x);
 
memset (buffer, 0, w * (wide ? 8 : 4));
 
width -= w;
buffer += w * (wide? 2 : 1);
x += w;
}
 
if (x < image->width)
{
w = MIN (width, image->width - x);
 
if (wide)
image->fetch_scanline_64 ((pixman_image_t *)image, x, y, w, buffer, NULL);
else
image->fetch_scanline_32 ((pixman_image_t *)image, x, y, w, buffer, NULL);
 
width -= w;
buffer += w * (wide? 2 : 1);
x += w;
}
 
memset (buffer, 0, width * (wide ? 8 : 4));
}
 
static void
bits_image_fetch_untransformed_repeat_normal (bits_image_t *image,
pixman_bool_t wide,
int x,
int y,
int width,
uint32_t * buffer)
{
uint32_t w;
 
while (y < 0)
y += image->height;
 
while (y >= image->height)
y -= image->height;
 
while (width)
{
while (x < 0)
x += image->width;
while (x >= image->width)
x -= image->width;
 
w = MIN (width, image->width - x);
 
if (wide)
image->fetch_scanline_64 ((pixman_image_t *)image, x, y, w, buffer, NULL);
else
image->fetch_scanline_32 ((pixman_image_t *)image, x, y, w, buffer, NULL);
 
buffer += w * (wide? 2 : 1);
x += w;
width -= w;
}
}
 
static void
bits_image_fetch_untransformed_32 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
if (image->common.repeat == PIXMAN_REPEAT_NONE)
{
bits_image_fetch_untransformed_repeat_none (
&image->bits, FALSE, x, y, width, buffer);
}
else
{
bits_image_fetch_untransformed_repeat_normal (
&image->bits, FALSE, x, y, width, buffer);
}
}
 
static void
bits_image_fetch_untransformed_64 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * unused)
{
if (image->common.repeat == PIXMAN_REPEAT_NONE)
{
bits_image_fetch_untransformed_repeat_none (
&image->bits, TRUE, x, y, width, buffer);
}
else
{
bits_image_fetch_untransformed_repeat_normal (
&image->bits, TRUE, x, y, width, buffer);
}
}
 
typedef struct
{
pixman_format_code_t format;
uint32_t flags;
fetch_scanline_t fetch_32;
fetch_scanline_t fetch_64;
} fetcher_info_t;
 
static const fetcher_info_t fetcher_info[] =
{
{ PIXMAN_solid,
FAST_PATH_NO_ALPHA_MAP,
bits_image_fetch_solid_32,
bits_image_fetch_solid_64
},
 
{ PIXMAN_any,
(FAST_PATH_NO_ALPHA_MAP |
FAST_PATH_ID_TRANSFORM |
FAST_PATH_NO_CONVOLUTION_FILTER |
FAST_PATH_NO_PAD_REPEAT |
FAST_PATH_NO_REFLECT_REPEAT),
bits_image_fetch_untransformed_32,
bits_image_fetch_untransformed_64
},
 
#define FAST_BILINEAR_FLAGS \
(FAST_PATH_NO_ALPHA_MAP | \
FAST_PATH_NO_ACCESSORS | \
FAST_PATH_HAS_TRANSFORM | \
FAST_PATH_AFFINE_TRANSFORM | \
FAST_PATH_X_UNIT_POSITIVE | \
FAST_PATH_Y_UNIT_ZERO | \
FAST_PATH_NONE_REPEAT | \
FAST_PATH_BILINEAR_FILTER)
 
{ PIXMAN_a8r8g8b8,
FAST_BILINEAR_FLAGS,
bits_image_fetch_bilinear_no_repeat_8888,
_pixman_image_get_scanline_generic_64
},
 
{ PIXMAN_x8r8g8b8,
FAST_BILINEAR_FLAGS,
bits_image_fetch_bilinear_no_repeat_8888,
_pixman_image_get_scanline_generic_64
},
 
#define GENERAL_BILINEAR_FLAGS \
(FAST_PATH_NO_ALPHA_MAP | \
FAST_PATH_NO_ACCESSORS | \
FAST_PATH_HAS_TRANSFORM | \
FAST_PATH_AFFINE_TRANSFORM | \
FAST_PATH_BILINEAR_FILTER)
 
#define BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \
{ PIXMAN_ ## format, \
GENERAL_BILINEAR_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \
bits_image_fetch_bilinear_affine_ ## name, \
_pixman_image_get_scanline_generic_64 \
},
 
BILINEAR_AFFINE_FAST_PATH (pad_a8r8g8b8, a8r8g8b8, PAD)
BILINEAR_AFFINE_FAST_PATH (none_a8r8g8b8, a8r8g8b8, NONE)
BILINEAR_AFFINE_FAST_PATH (reflect_a8r8g8b8, a8r8g8b8, REFLECT)
BILINEAR_AFFINE_FAST_PATH (normal_a8r8g8b8, a8r8g8b8, NORMAL)
BILINEAR_AFFINE_FAST_PATH (pad_x8r8g8b8, x8r8g8b8, PAD)
BILINEAR_AFFINE_FAST_PATH (none_x8r8g8b8, x8r8g8b8, NONE)
BILINEAR_AFFINE_FAST_PATH (reflect_x8r8g8b8, x8r8g8b8, REFLECT)
BILINEAR_AFFINE_FAST_PATH (normal_x8r8g8b8, x8r8g8b8, NORMAL)
BILINEAR_AFFINE_FAST_PATH (pad_a8, a8, PAD)
BILINEAR_AFFINE_FAST_PATH (none_a8, a8, NONE)
BILINEAR_AFFINE_FAST_PATH (reflect_a8, a8, REFLECT)
BILINEAR_AFFINE_FAST_PATH (normal_a8, a8, NORMAL)
BILINEAR_AFFINE_FAST_PATH (pad_r5g6b5, r5g6b5, PAD)
BILINEAR_AFFINE_FAST_PATH (none_r5g6b5, r5g6b5, NONE)
BILINEAR_AFFINE_FAST_PATH (reflect_r5g6b5, r5g6b5, REFLECT)
BILINEAR_AFFINE_FAST_PATH (normal_r5g6b5, r5g6b5, NORMAL)
 
/* Affine, no alpha */
{ PIXMAN_any,
(FAST_PATH_NO_ALPHA_MAP | FAST_PATH_HAS_TRANSFORM | FAST_PATH_AFFINE_TRANSFORM),
bits_image_fetch_affine_no_alpha,
_pixman_image_get_scanline_generic_64
},
 
/* General */
{ PIXMAN_any, 0, bits_image_fetch_general, _pixman_image_get_scanline_generic_64 },
 
{ PIXMAN_null },
};
 
static void
bits_image_property_changed (pixman_image_t *image)
{
uint32_t flags = image->common.flags;
pixman_format_code_t format = image->common.extended_format_code;
const fetcher_info_t *info;
 
_pixman_bits_image_setup_accessors (&image->bits);
 
info = fetcher_info;
while (info->format != PIXMAN_null)
{
if ((info->format == format || info->format == PIXMAN_any) &&
(info->flags & flags) == info->flags)
{
image->common.get_scanline_32 = info->fetch_32;
image->common.get_scanline_64 = info->fetch_64;
break;
}
 
info++;
}
}
 
static uint32_t *
create_bits (pixman_format_code_t format,
int width,
int height,
int * rowstride_bytes)
{
int stride;
int buf_size;
int bpp;
 
/* what follows is a long-winded way, avoiding any possibility of integer
* overflows, of saying:
* stride = ((width * bpp + 0x1f) >> 5) * sizeof (uint32_t);
*/
 
bpp = PIXMAN_FORMAT_BPP (format);
if (pixman_multiply_overflows_int (width, bpp))
return NULL;
 
stride = width * bpp;
if (pixman_addition_overflows_int (stride, 0x1f))
return NULL;
 
stride += 0x1f;
stride >>= 5;
 
stride *= sizeof (uint32_t);
 
if (pixman_multiply_overflows_int (height, stride))
return NULL;
 
buf_size = height * stride;
 
if (rowstride_bytes)
*rowstride_bytes = stride;
 
return calloc (buf_size, 1);
}
 
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_bits (pixman_format_code_t format,
int width,
int height,
uint32_t * bits,
int rowstride_bytes)
{
pixman_image_t *image;
uint32_t *free_me = NULL;
 
/* must be a whole number of uint32_t's
*/
return_val_if_fail (
bits == NULL || (rowstride_bytes % sizeof (uint32_t)) == 0, NULL);
 
return_val_if_fail (PIXMAN_FORMAT_BPP (format) >= PIXMAN_FORMAT_DEPTH (format), NULL);
 
if (!bits && width && height)
{
free_me = bits = create_bits (format, width, height, &rowstride_bytes);
if (!bits)
return NULL;
}
 
image = _pixman_image_allocate ();
 
if (!image)
{
if (free_me)
free (free_me);
 
return NULL;
}
 
image->type = BITS;
image->bits.format = format;
image->bits.width = width;
image->bits.height = height;
image->bits.bits = bits;
image->bits.free_me = free_me;
image->bits.read_func = NULL;
image->bits.write_func = NULL;
 
/* The rowstride is stored in number of uint32_t */
image->bits.rowstride = rowstride_bytes / (int) sizeof (uint32_t);
 
image->bits.indexed = NULL;
 
image->common.property_changed = bits_image_property_changed;
 
_pixman_image_reset_clip_region (image);
 
return image;
}
/programs/develop/libraries/pixman/pixman-combine32.c
0,0 → 1,2465
/* WARNING: This file is generated by combine.pl from combine.inc.
Please edit one of those files rather than this one. */
 
#line 1 "pixman-combine.c.template"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <math.h>
#include <string.h>
 
#include "pixman-private.h"
 
#include "pixman-combine32.h"
 
/*** per channel helper functions ***/
 
static void
combine_mask_ca (uint32_t *src, uint32_t *mask)
{
uint32_t a = *mask;
 
uint32_t x;
uint16_t xa;
 
if (!a)
{
*(src) = 0;
return;
}
 
x = *(src);
if (a == ~0)
{
x = x >> A_SHIFT;
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
return;
}
 
xa = x >> A_SHIFT;
UN8x4_MUL_UN8x4 (x, a);
*(src) = x;
UN8x4_MUL_UN8 (a, xa);
*(mask) = a;
}
 
static void
combine_mask_value_ca (uint32_t *src, const uint32_t *mask)
{
uint32_t a = *mask;
uint32_t x;
 
if (!a)
{
*(src) = 0;
return;
}
 
if (a == ~0)
return;
 
x = *(src);
UN8x4_MUL_UN8x4 (x, a);
*(src) = x;
}
 
static void
combine_mask_alpha_ca (const uint32_t *src, uint32_t *mask)
{
uint32_t a = *(mask);
uint32_t x;
 
if (!a)
return;
 
x = *(src) >> A_SHIFT;
if (x == MASK)
return;
 
if (a == ~0)
{
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
return;
}
 
UN8x4_MUL_UN8 (a, x);
*(mask) = a;
}
 
/*
* There are two ways of handling alpha -- either as a single unified value or
* a separate value for each component, hence each macro must have two
* versions. The unified alpha version has a 'U' at the end of the name,
* the component version has a 'C'. Similarly, functions which deal with
* this difference will have two versions using the same convention.
*/
 
/*
* All of the composing functions
*/
 
static force_inline uint32_t
combine_mask (const uint32_t *src, const uint32_t *mask, int i)
{
uint32_t s, m;
 
if (mask)
{
m = *(mask + i) >> A_SHIFT;
 
if (!m)
return 0;
}
 
s = *(src + i);
 
if (mask)
UN8x4_MUL_UN8 (s, m);
 
return s;
}
 
static void
combine_clear (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
memset (dest, 0, width * sizeof(uint32_t));
}
 
static void
combine_dst (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
return;
}
 
static void
combine_src_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
if (!mask)
memcpy (dest, src, width * sizeof (uint32_t));
else
{
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
 
*(dest + i) = s;
}
}
}
 
/* if the Src is opaque, call combine_src_u */
static void
combine_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t ia = ALPHA_8 (~s);
 
UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
*(dest + i) = d;
}
}
 
/* if the Dst is opaque, this is a noop */
static void
combine_over_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t ia = ALPHA_8 (~*(dest + i));
UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
*(dest + i) = s;
}
}
 
/* if the Dst is opaque, call combine_src_u */
static void
combine_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t a = ALPHA_8 (*(dest + i));
UN8x4_MUL_UN8 (s, a);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, this is a noop */
static void
combine_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t a = ALPHA_8 (s);
UN8x4_MUL_UN8 (d, a);
*(dest + i) = d;
}
}
 
/* if the Dst is opaque, call combine_clear */
static void
combine_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t a = ALPHA_8 (~*(dest + i));
UN8x4_MUL_UN8 (s, a);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, call combine_clear */
static void
combine_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t a = ALPHA_8 (~s);
UN8x4_MUL_UN8 (d, a);
*(dest + i) = d;
}
}
 
/* if the Src is opaque, call combine_in_u */
/* if the Dst is opaque, call combine_over_u */
/* if both the Src and Dst are opaque, call combine_src_u */
static void
combine_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t dest_a = ALPHA_8 (d);
uint32_t src_ia = ALPHA_8 (~s);
 
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, call combine_over_reverse_u */
/* if the Dst is opaque, call combine_in_reverse_u */
/* if both the Src and Dst are opaque, call combine_dst_u */
static void
combine_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t src_a = ALPHA_8 (s);
uint32_t dest_ia = ALPHA_8 (~d);
 
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, call combine_over_u */
/* if the Dst is opaque, call combine_over_reverse_u */
/* if both the Src and Dst are opaque, call combine_clear */
static void
combine_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t src_ia = ALPHA_8 (~s);
uint32_t dest_ia = ALPHA_8 (~d);
 
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
*(dest + i) = s;
}
}
 
static void
combine_add_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
UN8x4_ADD_UN8x4 (d, s);
*(dest + i) = d;
}
}
 
/* if the Src is opaque, call combine_add_u */
/* if the Dst is opaque, call combine_add_u */
/* if both the Src and Dst are opaque, call combine_add_u */
static void
combine_saturate_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint16_t sa, da;
 
sa = s >> A_SHIFT;
da = ~d >> A_SHIFT;
if (sa > da)
{
sa = DIV_UN8 (da, sa);
UN8x4_MUL_UN8 (s, sa);
}
;
UN8x4_ADD_UN8x4 (d, s);
*(dest + i) = d;
}
}
 
/*
* PDF blend modes:
* The following blend modes have been taken from the PDF ISO 32000
* specification, which at this point in time is available from
* http://www.adobe.com/devnet/acrobat/pdfs/PDF32000_2008.pdf
* The relevant chapters are 11.3.5 and 11.3.6.
* The formula for computing the final pixel color given in 11.3.6 is:
* αr × Cr = (1 – αs) × αb × Cb + (1 – αb) × αs × Cs + αb × αs × B(Cb, Cs)
* with B() being the blend function.
* Note that OVER is a special case of this operation, using B(Cb, Cs) = Cs
*
* These blend modes should match the SVG filter draft specification, as
* it has been designed to mirror ISO 32000. Note that at the current point
* no released draft exists that shows this, as the formulas have not been
* updated yet after the release of ISO 32000.
*
* The default implementation here uses the PDF_SEPARABLE_BLEND_MODE and
* PDF_NON_SEPARABLE_BLEND_MODE macros, which take the blend function as an
* argument. Note that this implementation operates on premultiplied colors,
* while the PDF specification does not. Therefore the code uses the formula
* ar.Cra = (1 – as) . Dca + (1 – ad) . Sca + B(Dca, ad, Sca, as)
*/
 
/*
* Multiply
* B(Dca, ad, Sca, as) = Dca.Sca
*/
 
static void
combine_multiply_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t ss = s;
uint32_t src_ia = ALPHA_8 (~s);
uint32_t dest_ia = ALPHA_8 (~d);
 
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (ss, dest_ia, d, src_ia);
UN8x4_MUL_UN8x4 (d, s);
UN8x4_ADD_UN8x4 (d, ss);
 
*(dest + i) = d;
}
}
 
static void
combine_multiply_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t m = *(mask + i);
uint32_t s = *(src + i);
uint32_t d = *(dest + i);
uint32_t r = d;
uint32_t dest_ia = ALPHA_8 (~d);
 
combine_mask_value_ca (&s, &m);
 
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (r, ~m, s, dest_ia);
UN8x4_MUL_UN8x4 (d, s);
UN8x4_ADD_UN8x4 (r, d);
 
*(dest + i) = r;
}
}
 
#define PDF_SEPARABLE_BLEND_MODE(name) \
static void \
combine_ ## name ## _u (pixman_implementation_t *imp, \
pixman_op_t op, \
uint32_t * dest, \
const uint32_t * src, \
const uint32_t * mask, \
int width) \
{ \
int i; \
for (i = 0; i < width; ++i) { \
uint32_t s = combine_mask (src, mask, i); \
uint32_t d = *(dest + i); \
uint8_t sa = ALPHA_8 (s); \
uint8_t isa = ~sa; \
uint8_t da = ALPHA_8 (d); \
uint8_t ida = ~da; \
uint32_t result; \
\
result = d; \
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (result, isa, s, ida); \
\
*(dest + i) = result + \
(DIV_ONE_UN8 (sa * da) << A_SHIFT) + \
(blend_ ## name (RED_8 (d), da, RED_8 (s), sa) << R_SHIFT) + \
(blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), sa) << G_SHIFT) + \
(blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), sa)); \
} \
} \
\
static void \
combine_ ## name ## _ca (pixman_implementation_t *imp, \
pixman_op_t op, \
uint32_t * dest, \
const uint32_t * src, \
const uint32_t * mask, \
int width) \
{ \
int i; \
for (i = 0; i < width; ++i) { \
uint32_t m = *(mask + i); \
uint32_t s = *(src + i); \
uint32_t d = *(dest + i); \
uint8_t da = ALPHA_8 (d); \
uint8_t ida = ~da; \
uint32_t result; \
\
combine_mask_value_ca (&s, &m); \
\
result = d; \
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (result, ~m, s, ida); \
\
result += \
(DIV_ONE_UN8 (ALPHA_8 (m) * da) << A_SHIFT) + \
(blend_ ## name (RED_8 (d), da, RED_8 (s), RED_8 (m)) << R_SHIFT) + \
(blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), GREEN_8 (m)) << G_SHIFT) + \
(blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), BLUE_8 (m))); \
\
*(dest + i) = result; \
} \
}
 
/*
* Screen
* B(Dca, ad, Sca, as) = Dca.sa + Sca.da - Dca.Sca
*/
static inline uint32_t
blend_screen (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
return DIV_ONE_UN8 (sca * da + dca * sa - sca * dca);
}
 
PDF_SEPARABLE_BLEND_MODE (screen)
 
/*
* Overlay
* B(Dca, Da, Sca, Sa) =
* if 2.Dca < Da
* 2.Sca.Dca
* otherwise
* Sa.Da - 2.(Da - Dca).(Sa - Sca)
*/
static inline uint32_t
blend_overlay (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
uint32_t rca;
 
if (2 * dca < da)
rca = 2 * sca * dca;
else
rca = sa * da - 2 * (da - dca) * (sa - sca);
return DIV_ONE_UN8 (rca);
}
 
PDF_SEPARABLE_BLEND_MODE (overlay)
 
/*
* Darken
* B(Dca, Da, Sca, Sa) = min (Sca.Da, Dca.Sa)
*/
static inline uint32_t
blend_darken (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
uint32_t s, d;
 
s = sca * da;
d = dca * sa;
return DIV_ONE_UN8 (s > d ? d : s);
}
 
PDF_SEPARABLE_BLEND_MODE (darken)
 
/*
* Lighten
* B(Dca, Da, Sca, Sa) = max (Sca.Da, Dca.Sa)
*/
static inline uint32_t
blend_lighten (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
uint32_t s, d;
 
s = sca * da;
d = dca * sa;
return DIV_ONE_UN8 (s > d ? s : d);
}
 
PDF_SEPARABLE_BLEND_MODE (lighten)
 
/*
* Color dodge
* B(Dca, Da, Sca, Sa) =
* if Dca == 0
* 0
* if Sca == Sa
* Sa.Da
* otherwise
* Sa.Da. min (1, Dca / Da / (1 - Sca/Sa))
*/
static inline uint32_t
blend_color_dodge (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
if (sca >= sa)
{
return dca == 0 ? 0 : DIV_ONE_UN8 (sa * da);
}
else
{
uint32_t rca = dca * sa / (sa - sca);
return DIV_ONE_UN8 (sa * MIN (rca, da));
}
}
 
PDF_SEPARABLE_BLEND_MODE (color_dodge)
 
/*
* Color burn
* B(Dca, Da, Sca, Sa) =
* if Dca == Da
* Sa.Da
* if Sca == 0
* 0
* otherwise
* Sa.Da.(1 - min (1, (1 - Dca/Da).Sa / Sca))
*/
static inline uint32_t
blend_color_burn (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
if (sca == 0)
{
return dca < da ? 0 : DIV_ONE_UN8 (sa * da);
}
else
{
uint32_t rca = (da - dca) * sa / sca;
return DIV_ONE_UN8 (sa * (MAX (rca, da) - rca));
}
}
 
PDF_SEPARABLE_BLEND_MODE (color_burn)
 
/*
* Hard light
* B(Dca, Da, Sca, Sa) =
* if 2.Sca < Sa
* 2.Sca.Dca
* otherwise
* Sa.Da - 2.(Da - Dca).(Sa - Sca)
*/
static inline uint32_t
blend_hard_light (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
if (2 * sca < sa)
return DIV_ONE_UN8 (2 * sca * dca);
else
return DIV_ONE_UN8 (sa * da - 2 * (da - dca) * (sa - sca));
}
 
PDF_SEPARABLE_BLEND_MODE (hard_light)
 
/*
* Soft light
* B(Dca, Da, Sca, Sa) =
* if (2.Sca <= Sa)
* Dca.(Sa - (1 - Dca/Da).(2.Sca - Sa))
* otherwise if Dca.4 <= Da
* Dca.(Sa + (2.Sca - Sa).((16.Dca/Da - 12).Dca/Da + 3)
* otherwise
* (Dca.Sa + (SQRT (Dca/Da).Da - Dca).(2.Sca - Sa))
*/
static inline uint32_t
blend_soft_light (uint32_t dca_org,
uint32_t da_org,
uint32_t sca_org,
uint32_t sa_org)
{
double dca = dca_org * (1.0 / MASK);
double da = da_org * (1.0 / MASK);
double sca = sca_org * (1.0 / MASK);
double sa = sa_org * (1.0 / MASK);
double rca;
 
if (2 * sca < sa)
{
if (da == 0)
rca = dca * sa;
else
rca = dca * sa - dca * (da - dca) * (sa - 2 * sca) / da;
}
else if (da == 0)
{
rca = 0;
}
else if (4 * dca <= da)
{
rca = dca * sa +
(2 * sca - sa) * dca * ((16 * dca / da - 12) * dca / da + 3);
}
else
{
rca = dca * sa + (sqrt (dca * da) - dca) * (2 * sca - sa);
}
return rca * MASK + 0.5;
}
 
PDF_SEPARABLE_BLEND_MODE (soft_light)
 
/*
* Difference
* B(Dca, Da, Sca, Sa) = abs (Dca.Sa - Sca.Da)
*/
static inline uint32_t
blend_difference (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
uint32_t dcasa = dca * sa;
uint32_t scada = sca * da;
 
if (scada < dcasa)
return DIV_ONE_UN8 (dcasa - scada);
else
return DIV_ONE_UN8 (scada - dcasa);
}
 
PDF_SEPARABLE_BLEND_MODE (difference)
 
/*
* Exclusion
* B(Dca, Da, Sca, Sa) = (Sca.Da + Dca.Sa - 2.Sca.Dca)
*/
 
/* This can be made faster by writing it directly and not using
* PDF_SEPARABLE_BLEND_MODE, but that's a performance optimization */
 
static inline uint32_t
blend_exclusion (uint32_t dca, uint32_t da, uint32_t sca, uint32_t sa)
{
return DIV_ONE_UN8 (sca * da + dca * sa - 2 * dca * sca);
}
 
PDF_SEPARABLE_BLEND_MODE (exclusion)
 
#undef PDF_SEPARABLE_BLEND_MODE
 
/*
* PDF nonseperable blend modes are implemented using the following functions
* to operate in Hsl space, with Cmax, Cmid, Cmin referring to the max, mid
* and min value of the red, green and blue components.
*
* LUM (C) = 0.3 × Cred + 0.59 × Cgreen + 0.11 × Cblue
*
* clip_color (C):
* l = LUM (C)
* min = Cmin
* max = Cmax
* if n < 0.0
* C = l + ( ( ( C – l ) × l ) ⁄ ( l – min ) )
* if x > 1.0
* C = l + ( ( ( C – l ) × ( 1 – l ) ) ⁄ ( max – l ) )
* return C
*
* set_lum (C, l):
* d = l – LUM (C)
* C += d
* return clip_color (C)
*
* SAT (C) = CH_MAX (C) - CH_MIN (C)
*
* set_sat (C, s):
* if Cmax > Cmin
* Cmid = ( ( ( Cmid – Cmin ) × s ) ⁄ ( Cmax – Cmin ) )
* Cmax = s
* else
* Cmid = Cmax = 0.0
* Cmin = 0.0
* return C
*/
 
/* For premultiplied colors, we need to know what happens when C is
* multiplied by a real number. LUM and SAT are linear:
*
* LUM (r × C) = r × LUM (C) SAT (r * C) = r * SAT (C)
*
* If we extend clip_color with an extra argument a and change
*
* if x >= 1.0
*
* into
*
* if x >= a
*
* then clip_color is also linear:
*
* r * clip_color (C, a) = clip_color (r_c, ra);
*
* for positive r.
*
* Similarly, we can extend set_lum with an extra argument that is just passed
* on to clip_color:
*
* r * set_lum ( C, l, a)
*
* = r × clip_color ( C + l - LUM (C), a)
*
* = clip_color ( r * C + r × l - r * LUM (C), r * a)
*
* = set_lum ( r * C, r * l, r * a)
*
* Finally, set_sat:
*
* r * set_sat (C, s) = set_sat (x * C, r * s)
*
* The above holds for all non-zero x, because they x'es in the fraction for
* C_mid cancel out. Specifically, it holds for x = r:
*
* r * set_sat (C, s) = set_sat (r_c, rs)
*
*/
 
/* So, for the non-separable PDF blend modes, we have (using s, d for
* non-premultiplied colors, and S, D for premultiplied:
*
* Color:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (S/a_s, LUM (D/a_d), 1)
* = set_lum (S * a_d, a_s * LUM (D), a_s * a_d)
*
*
* Luminosity:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (D/a_d, LUM(S/a_s), 1)
* = set_lum (a_s * D, a_d * LUM(S), a_s * a_d)
*
*
* Saturation:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (set_sat (D/a_d, SAT (S/a_s)), LUM (D/a_d), 1)
* = set_lum (a_s * a_d * set_sat (D/a_d, SAT (S/a_s)),
* a_s * LUM (D), a_s * a_d)
* = set_lum (set_sat (a_s * D, a_d * SAT (S), a_s * LUM (D), a_s * a_d))
*
* Hue:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (set_sat (S/a_s, SAT (D/a_d)), LUM (D/a_d), 1)
* = a_s * a_d * set_lum (set_sat (a_d * S, a_s * SAT (D)),
* a_s * LUM (D), a_s * a_d)
*
*/
 
#define CH_MIN(c) (c[0] < c[1] ? (c[0] < c[2] ? c[0] : c[2]) : (c[1] < c[2] ? c[1] : c[2]))
#define CH_MAX(c) (c[0] > c[1] ? (c[0] > c[2] ? c[0] : c[2]) : (c[1] > c[2] ? c[1] : c[2]))
#define LUM(c) ((c[0] * 30 + c[1] * 59 + c[2] * 11) / 100)
#define SAT(c) (CH_MAX (c) - CH_MIN (c))
 
#define PDF_NON_SEPARABLE_BLEND_MODE(name) \
static void \
combine_ ## name ## _u (pixman_implementation_t *imp, \
pixman_op_t op, \
uint32_t *dest, \
const uint32_t *src, \
const uint32_t *mask, \
int width) \
{ \
int i; \
for (i = 0; i < width; ++i) \
{ \
uint32_t s = combine_mask (src, mask, i); \
uint32_t d = *(dest + i); \
uint8_t sa = ALPHA_8 (s); \
uint8_t isa = ~sa; \
uint8_t da = ALPHA_8 (d); \
uint8_t ida = ~da; \
uint32_t result; \
uint32_t sc[3], dc[3], c[3]; \
\
result = d; \
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (result, isa, s, ida); \
dc[0] = RED_8 (d); \
sc[0] = RED_8 (s); \
dc[1] = GREEN_8 (d); \
sc[1] = GREEN_8 (s); \
dc[2] = BLUE_8 (d); \
sc[2] = BLUE_8 (s); \
blend_ ## name (c, dc, da, sc, sa); \
\
*(dest + i) = result + \
(DIV_ONE_UN8 (sa * da) << A_SHIFT) + \
(DIV_ONE_UN8 (c[0]) << R_SHIFT) + \
(DIV_ONE_UN8 (c[1]) << G_SHIFT) + \
(DIV_ONE_UN8 (c[2])); \
} \
}
 
static void
set_lum (uint32_t dest[3], uint32_t src[3], uint32_t sa, uint32_t lum)
{
double a, l, min, max;
double tmp[3];
 
a = sa * (1.0 / MASK);
 
l = lum * (1.0 / MASK);
tmp[0] = src[0] * (1.0 / MASK);
tmp[1] = src[1] * (1.0 / MASK);
tmp[2] = src[2] * (1.0 / MASK);
 
l = l - LUM (tmp);
tmp[0] += l;
tmp[1] += l;
tmp[2] += l;
 
/* clip_color */
l = LUM (tmp);
min = CH_MIN (tmp);
max = CH_MAX (tmp);
 
if (min < 0)
{
if (l - min == 0.0)
{
tmp[0] = 0;
tmp[1] = 0;
tmp[2] = 0;
}
else
{
tmp[0] = l + (tmp[0] - l) * l / (l - min);
tmp[1] = l + (tmp[1] - l) * l / (l - min);
tmp[2] = l + (tmp[2] - l) * l / (l - min);
}
}
if (max > a)
{
if (max - l == 0.0)
{
tmp[0] = a;
tmp[1] = a;
tmp[2] = a;
}
else
{
tmp[0] = l + (tmp[0] - l) * (a - l) / (max - l);
tmp[1] = l + (tmp[1] - l) * (a - l) / (max - l);
tmp[2] = l + (tmp[2] - l) * (a - l) / (max - l);
}
}
 
dest[0] = tmp[0] * MASK + 0.5;
dest[1] = tmp[1] * MASK + 0.5;
dest[2] = tmp[2] * MASK + 0.5;
}
 
static void
set_sat (uint32_t dest[3], uint32_t src[3], uint32_t sat)
{
int id[3];
uint32_t min, max;
 
if (src[0] > src[1])
{
if (src[0] > src[2])
{
id[0] = 0;
if (src[1] > src[2])
{
id[1] = 1;
id[2] = 2;
}
else
{
id[1] = 2;
id[2] = 1;
}
}
else
{
id[0] = 2;
id[1] = 0;
id[2] = 1;
}
}
else
{
if (src[0] > src[2])
{
id[0] = 1;
id[1] = 0;
id[2] = 2;
}
else
{
id[2] = 0;
if (src[1] > src[2])
{
id[0] = 1;
id[1] = 2;
}
else
{
id[0] = 2;
id[1] = 1;
}
}
}
 
max = dest[id[0]];
min = dest[id[2]];
if (max > min)
{
dest[id[1]] = (dest[id[1]] - min) * sat / (max - min);
dest[id[0]] = sat;
dest[id[2]] = 0;
}
else
{
dest[0] = dest[1] = dest[2] = 0;
}
}
 
/*
* Hue:
* B(Cb, Cs) = set_lum (set_sat (Cs, SAT (Cb)), LUM (Cb))
*/
static inline void
blend_hsl_hue (uint32_t c[3],
uint32_t dc[3],
uint32_t da,
uint32_t sc[3],
uint32_t sa)
{
c[0] = sc[0] * da;
c[1] = sc[1] * da;
c[2] = sc[2] * da;
set_sat (c, c, SAT (dc) * sa);
set_lum (c, c, sa * da, LUM (dc) * sa);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_hue)
 
/*
* Saturation:
* B(Cb, Cs) = set_lum (set_sat (Cb, SAT (Cs)), LUM (Cb))
*/
static inline void
blend_hsl_saturation (uint32_t c[3],
uint32_t dc[3],
uint32_t da,
uint32_t sc[3],
uint32_t sa)
{
c[0] = dc[0] * sa;
c[1] = dc[1] * sa;
c[2] = dc[2] * sa;
set_sat (c, c, SAT (sc) * da);
set_lum (c, c, sa * da, LUM (dc) * sa);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_saturation)
 
/*
* Color:
* B(Cb, Cs) = set_lum (Cs, LUM (Cb))
*/
static inline void
blend_hsl_color (uint32_t c[3],
uint32_t dc[3],
uint32_t da,
uint32_t sc[3],
uint32_t sa)
{
c[0] = sc[0] * da;
c[1] = sc[1] * da;
c[2] = sc[2] * da;
set_lum (c, c, sa * da, LUM (dc) * sa);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_color)
 
/*
* Luminosity:
* B(Cb, Cs) = set_lum (Cb, LUM (Cs))
*/
static inline void
blend_hsl_luminosity (uint32_t c[3],
uint32_t dc[3],
uint32_t da,
uint32_t sc[3],
uint32_t sa)
{
c[0] = dc[0] * sa;
c[1] = dc[1] * sa;
c[2] = dc[2] * sa;
set_lum (c, c, sa * da, LUM (sc) * da);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
 
#undef SAT
#undef LUM
#undef CH_MAX
#undef CH_MIN
#undef PDF_NON_SEPARABLE_BLEND_MODE
 
/* Overlay
*
* All of the disjoint composing functions
*
* The four entries in the first column indicate what source contributions
* come from each of the four areas of the picture -- areas covered by neither
* A nor B, areas covered only by A, areas covered only by B and finally
* areas covered by both A and B.
*
* Disjoint Conjoint
* Fa Fb Fa Fb
* (0,0,0,0) 0 0 0 0
* (0,A,0,A) 1 0 1 0
* (0,0,B,B) 0 1 0 1
* (0,A,B,A) 1 min((1-a)/b,1) 1 max(1-a/b,0)
* (0,A,B,B) min((1-b)/a,1) 1 max(1-b/a,0) 1
* (0,0,0,A) max(1-(1-b)/a,0) 0 min(1,b/a) 0
* (0,0,0,B) 0 max(1-(1-a)/b,0) 0 min(a/b,1)
* (0,A,0,0) min(1,(1-b)/a) 0 max(1-b/a,0) 0
* (0,0,B,0) 0 min(1,(1-a)/b) 0 max(1-a/b,0)
* (0,0,B,A) max(1-(1-b)/a,0) min(1,(1-a)/b) min(1,b/a) max(1-a/b,0)
* (0,A,0,B) min(1,(1-b)/a) max(1-(1-a)/b,0) max(1-b/a,0) min(1,a/b)
* (0,A,B,0) min(1,(1-b)/a) min(1,(1-a)/b) max(1-b/a,0) max(1-a/b,0)
*/
 
#define COMBINE_A_OUT 1
#define COMBINE_A_IN 2
#define COMBINE_B_OUT 4
#define COMBINE_B_IN 8
 
#define COMBINE_CLEAR 0
#define COMBINE_A (COMBINE_A_OUT | COMBINE_A_IN)
#define COMBINE_B (COMBINE_B_OUT | COMBINE_B_IN)
#define COMBINE_A_OVER (COMBINE_A_OUT | COMBINE_B_OUT | COMBINE_A_IN)
#define COMBINE_B_OVER (COMBINE_A_OUT | COMBINE_B_OUT | COMBINE_B_IN)
#define COMBINE_A_ATOP (COMBINE_B_OUT | COMBINE_A_IN)
#define COMBINE_B_ATOP (COMBINE_A_OUT | COMBINE_B_IN)
#define COMBINE_XOR (COMBINE_A_OUT | COMBINE_B_OUT)
 
/* portion covered by a but not b */
static uint8_t
combine_disjoint_out_part (uint8_t a, uint8_t b)
{
/* min (1, (1-b) / a) */
 
b = ~b; /* 1 - b */
if (b >= a) /* 1 - b >= a -> (1-b)/a >= 1 */
return MASK; /* 1 */
return DIV_UN8 (b, a); /* (1-b) / a */
}
 
/* portion covered by both a and b */
static uint8_t
combine_disjoint_in_part (uint8_t a, uint8_t b)
{
/* max (1-(1-b)/a,0) */
/* = - min ((1-b)/a - 1, 0) */
/* = 1 - min (1, (1-b)/a) */
 
b = ~b; /* 1 - b */
if (b >= a) /* 1 - b >= a -> (1-b)/a >= 1 */
return 0; /* 1 - 1 */
return ~DIV_UN8(b, a); /* 1 - (1-b) / a */
}
 
/* portion covered by a but not b */
static uint8_t
combine_conjoint_out_part (uint8_t a, uint8_t b)
{
/* max (1-b/a,0) */
/* = 1-min(b/a,1) */
 
/* min (1, (1-b) / a) */
 
if (b >= a) /* b >= a -> b/a >= 1 */
return 0x00; /* 0 */
return ~DIV_UN8(b, a); /* 1 - b/a */
}
 
/* portion covered by both a and b */
static uint8_t
combine_conjoint_in_part (uint8_t a, uint8_t b)
{
/* min (1,b/a) */
 
if (b >= a) /* b >= a -> b/a >= 1 */
return MASK; /* 1 */
return DIV_UN8 (b, a); /* b/a */
}
 
#define GET_COMP(v, i) ((uint16_t) (uint8_t) ((v) >> i))
 
#define ADD(x, y, i, t) \
((t) = GET_COMP (x, i) + GET_COMP (y, i), \
(uint32_t) ((uint8_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
 
#define GENERIC(x, y, i, ax, ay, t, u, v) \
((t) = (MUL_UN8 (GET_COMP (y, i), ay, (u)) + \
MUL_UN8 (GET_COMP (x, i), ax, (v))), \
(uint32_t) ((uint8_t) ((t) | \
(0 - ((t) >> G_SHIFT)))) << (i))
 
static void
combine_disjoint_general_u (uint32_t * dest,
const uint32_t *src,
const uint32_t *mask,
int width,
uint8_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t m, n, o, p;
uint16_t Fa, Fb, t, u, v;
uint8_t sa = s >> A_SHIFT;
uint8_t da = d >> A_SHIFT;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
Fa = combine_disjoint_out_part (sa, da);
break;
 
case COMBINE_A_IN:
Fa = combine_disjoint_in_part (sa, da);
break;
 
case COMBINE_A:
Fa = MASK;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
Fb = combine_disjoint_out_part (da, sa);
break;
 
case COMBINE_B_IN:
Fb = combine_disjoint_in_part (da, sa);
break;
 
case COMBINE_B:
Fb = MASK;
break;
}
m = GENERIC (s, d, 0, Fa, Fb, t, u, v);
n = GENERIC (s, d, G_SHIFT, Fa, Fb, t, u, v);
o = GENERIC (s, d, R_SHIFT, Fa, Fb, t, u, v);
p = GENERIC (s, d, A_SHIFT, Fa, Fb, t, u, v);
s = m | n | o | p;
*(dest + i) = s;
}
}
 
static void
combine_disjoint_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint16_t a = s >> A_SHIFT;
 
if (s != 0x00)
{
uint32_t d = *(dest + i);
a = combine_disjoint_out_part (d >> A_SHIFT, a);
UN8x4_MUL_UN8_ADD_UN8x4 (d, a, s);
 
*(dest + i) = d;
}
}
}
 
static void
combine_disjoint_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_disjoint_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_disjoint_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_disjoint_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_disjoint_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_disjoint_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_disjoint_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_XOR);
}
 
static void
combine_conjoint_general_u (uint32_t * dest,
const uint32_t *src,
const uint32_t *mask,
int width,
uint8_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = combine_mask (src, mask, i);
uint32_t d = *(dest + i);
uint32_t m, n, o, p;
uint16_t Fa, Fb, t, u, v;
uint8_t sa = s >> A_SHIFT;
uint8_t da = d >> A_SHIFT;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
Fa = combine_conjoint_out_part (sa, da);
break;
 
case COMBINE_A_IN:
Fa = combine_conjoint_in_part (sa, da);
break;
 
case COMBINE_A:
Fa = MASK;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
Fb = combine_conjoint_out_part (da, sa);
break;
 
case COMBINE_B_IN:
Fb = combine_conjoint_in_part (da, sa);
break;
 
case COMBINE_B:
Fb = MASK;
break;
}
 
m = GENERIC (s, d, 0, Fa, Fb, t, u, v);
n = GENERIC (s, d, G_SHIFT, Fa, Fb, t, u, v);
o = GENERIC (s, d, R_SHIFT, Fa, Fb, t, u, v);
p = GENERIC (s, d, A_SHIFT, Fa, Fb, t, u, v);
 
s = m | n | o | p;
 
*(dest + i) = s;
}
}
 
static void
combine_conjoint_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_OVER);
}
 
static void
combine_conjoint_over_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_OVER);
}
 
static void
combine_conjoint_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_conjoint_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_conjoint_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_conjoint_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_conjoint_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_conjoint_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_conjoint_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_XOR);
}
 
/************************************************************************/
/*********************** Per Channel functions **************************/
/************************************************************************/
 
static void
combine_clear_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
memset (dest, 0, width * sizeof(uint32_t));
}
 
static void
combine_src_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
 
combine_mask_value_ca (&s, &m);
 
*(dest + i) = s;
}
}
 
static void
combine_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t a;
 
combine_mask_ca (&s, &m);
 
a = ~m;
if (a)
{
uint32_t d = *(dest + i);
UN8x4_MUL_UN8x4_ADD_UN8x4 (d, a, s);
s = d;
}
 
*(dest + i) = s;
}
}
 
static void
combine_over_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t d = *(dest + i);
uint32_t a = ~d >> A_SHIFT;
 
if (a)
{
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
 
UN8x4_MUL_UN8x4 (s, m);
UN8x4_MUL_UN8_ADD_UN8x4 (s, a, d);
 
*(dest + i) = s;
}
}
}
 
static void
combine_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t d = *(dest + i);
uint16_t a = d >> A_SHIFT;
uint32_t s = 0;
 
if (a)
{
uint32_t m = *(mask + i);
 
s = *(src + i);
combine_mask_value_ca (&s, &m);
 
if (a != MASK)
UN8x4_MUL_UN8 (s, a);
}
 
*(dest + i) = s;
}
}
 
static void
combine_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t a;
 
combine_mask_alpha_ca (&s, &m);
 
a = m;
if (a != ~0)
{
uint32_t d = 0;
 
if (a)
{
d = *(dest + i);
UN8x4_MUL_UN8x4 (d, a);
}
 
*(dest + i) = d;
}
}
}
 
static void
combine_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t d = *(dest + i);
uint16_t a = ~d >> A_SHIFT;
uint32_t s = 0;
 
if (a)
{
uint32_t m = *(mask + i);
 
s = *(src + i);
combine_mask_value_ca (&s, &m);
 
if (a != MASK)
UN8x4_MUL_UN8 (s, a);
}
 
*(dest + i) = s;
}
}
 
static void
combine_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t a;
 
combine_mask_alpha_ca (&s, &m);
 
a = ~m;
if (a != ~0)
{
uint32_t d = 0;
 
if (a)
{
d = *(dest + i);
UN8x4_MUL_UN8x4 (d, a);
}
 
*(dest + i) = d;
}
}
}
 
static void
combine_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t d = *(dest + i);
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t ad;
uint16_t as = d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
ad = ~m;
 
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as);
 
*(dest + i) = d;
}
}
 
static void
combine_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t d = *(dest + i);
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t ad;
uint16_t as = ~d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
ad = m;
 
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as);
 
*(dest + i) = d;
}
}
 
static void
combine_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t d = *(dest + i);
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t ad;
uint16_t as = ~d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
ad = ~m;
 
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as);
 
*(dest + i) = d;
}
}
 
static void
combine_add_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s = *(src + i);
uint32_t m = *(mask + i);
uint32_t d = *(dest + i);
 
combine_mask_value_ca (&s, &m);
 
UN8x4_ADD_UN8x4 (d, s);
 
*(dest + i) = d;
}
}
 
static void
combine_saturate_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s, d;
uint16_t sa, sr, sg, sb, da;
uint16_t t, u, v;
uint32_t m, n, o, p;
 
d = *(dest + i);
s = *(src + i);
m = *(mask + i);
 
combine_mask_ca (&s, &m);
 
sa = (m >> A_SHIFT);
sr = (m >> R_SHIFT) & MASK;
sg = (m >> G_SHIFT) & MASK;
sb = m & MASK;
da = ~d >> A_SHIFT;
 
if (sb <= da)
m = ADD (s, d, 0, t);
else
m = GENERIC (s, d, 0, (da << G_SHIFT) / sb, MASK, t, u, v);
 
if (sg <= da)
n = ADD (s, d, G_SHIFT, t);
else
n = GENERIC (s, d, G_SHIFT, (da << G_SHIFT) / sg, MASK, t, u, v);
 
if (sr <= da)
o = ADD (s, d, R_SHIFT, t);
else
o = GENERIC (s, d, R_SHIFT, (da << G_SHIFT) / sr, MASK, t, u, v);
 
if (sa <= da)
p = ADD (s, d, A_SHIFT, t);
else
p = GENERIC (s, d, A_SHIFT, (da << G_SHIFT) / sa, MASK, t, u, v);
 
*(dest + i) = m | n | o | p;
}
}
 
static void
combine_disjoint_general_ca (uint32_t * dest,
const uint32_t *src,
const uint32_t *mask,
int width,
uint8_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s, d;
uint32_t m, n, o, p;
uint32_t Fa, Fb;
uint16_t t, u, v;
uint32_t sa;
uint8_t da;
 
s = *(src + i);
m = *(mask + i);
d = *(dest + i);
da = d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
sa = m;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
m = (uint32_t)combine_disjoint_out_part ((uint8_t) (sa >> 0), da);
n = (uint32_t)combine_disjoint_out_part ((uint8_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint32_t)combine_disjoint_out_part ((uint8_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint32_t)combine_disjoint_out_part ((uint8_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A_IN:
m = (uint32_t)combine_disjoint_in_part ((uint8_t) (sa >> 0), da);
n = (uint32_t)combine_disjoint_in_part ((uint8_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint32_t)combine_disjoint_in_part ((uint8_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint32_t)combine_disjoint_in_part ((uint8_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A:
Fa = ~0;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
m = (uint32_t)combine_disjoint_out_part (da, (uint8_t) (sa >> 0));
n = (uint32_t)combine_disjoint_out_part (da, (uint8_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint32_t)combine_disjoint_out_part (da, (uint8_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint32_t)combine_disjoint_out_part (da, (uint8_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B_IN:
m = (uint32_t)combine_disjoint_in_part (da, (uint8_t) (sa >> 0));
n = (uint32_t)combine_disjoint_in_part (da, (uint8_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint32_t)combine_disjoint_in_part (da, (uint8_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint32_t)combine_disjoint_in_part (da, (uint8_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B:
Fb = ~0;
break;
}
m = GENERIC (s, d, 0, GET_COMP (Fa, 0), GET_COMP (Fb, 0), t, u, v);
n = GENERIC (s, d, G_SHIFT, GET_COMP (Fa, G_SHIFT), GET_COMP (Fb, G_SHIFT), t, u, v);
o = GENERIC (s, d, R_SHIFT, GET_COMP (Fa, R_SHIFT), GET_COMP (Fb, R_SHIFT), t, u, v);
p = GENERIC (s, d, A_SHIFT, GET_COMP (Fa, A_SHIFT), GET_COMP (Fb, A_SHIFT), t, u, v);
 
s = m | n | o | p;
 
*(dest + i) = s;
}
}
 
static void
combine_disjoint_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_OVER);
}
 
static void
combine_disjoint_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_disjoint_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_disjoint_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_disjoint_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_disjoint_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_disjoint_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_disjoint_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_XOR);
}
 
static void
combine_conjoint_general_ca (uint32_t * dest,
const uint32_t *src,
const uint32_t *mask,
int width,
uint8_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint32_t s, d;
uint32_t m, n, o, p;
uint32_t Fa, Fb;
uint16_t t, u, v;
uint32_t sa;
uint8_t da;
 
s = *(src + i);
m = *(mask + i);
d = *(dest + i);
da = d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
sa = m;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
m = (uint32_t)combine_conjoint_out_part ((uint8_t) (sa >> 0), da);
n = (uint32_t)combine_conjoint_out_part ((uint8_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint32_t)combine_conjoint_out_part ((uint8_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint32_t)combine_conjoint_out_part ((uint8_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A_IN:
m = (uint32_t)combine_conjoint_in_part ((uint8_t) (sa >> 0), da);
n = (uint32_t)combine_conjoint_in_part ((uint8_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint32_t)combine_conjoint_in_part ((uint8_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint32_t)combine_conjoint_in_part ((uint8_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A:
Fa = ~0;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
m = (uint32_t)combine_conjoint_out_part (da, (uint8_t) (sa >> 0));
n = (uint32_t)combine_conjoint_out_part (da, (uint8_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint32_t)combine_conjoint_out_part (da, (uint8_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint32_t)combine_conjoint_out_part (da, (uint8_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B_IN:
m = (uint32_t)combine_conjoint_in_part (da, (uint8_t) (sa >> 0));
n = (uint32_t)combine_conjoint_in_part (da, (uint8_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint32_t)combine_conjoint_in_part (da, (uint8_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint32_t)combine_conjoint_in_part (da, (uint8_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B:
Fb = ~0;
break;
}
m = GENERIC (s, d, 0, GET_COMP (Fa, 0), GET_COMP (Fb, 0), t, u, v);
n = GENERIC (s, d, G_SHIFT, GET_COMP (Fa, G_SHIFT), GET_COMP (Fb, G_SHIFT), t, u, v);
o = GENERIC (s, d, R_SHIFT, GET_COMP (Fa, R_SHIFT), GET_COMP (Fb, R_SHIFT), t, u, v);
p = GENERIC (s, d, A_SHIFT, GET_COMP (Fa, A_SHIFT), GET_COMP (Fb, A_SHIFT), t, u, v);
 
s = m | n | o | p;
 
*(dest + i) = s;
}
}
 
static void
combine_conjoint_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_OVER);
}
 
static void
combine_conjoint_over_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_OVER);
}
 
static void
combine_conjoint_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_conjoint_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_conjoint_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_conjoint_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_conjoint_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_conjoint_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_conjoint_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_XOR);
}
 
void
_pixman_setup_combiner_functions_32 (pixman_implementation_t *imp)
{
/* Unified alpha */
imp->combine_32[PIXMAN_OP_CLEAR] = combine_clear;
imp->combine_32[PIXMAN_OP_SRC] = combine_src_u;
imp->combine_32[PIXMAN_OP_DST] = combine_dst;
imp->combine_32[PIXMAN_OP_OVER] = combine_over_u;
imp->combine_32[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_u;
imp->combine_32[PIXMAN_OP_IN] = combine_in_u;
imp->combine_32[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_u;
imp->combine_32[PIXMAN_OP_OUT] = combine_out_u;
imp->combine_32[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_u;
imp->combine_32[PIXMAN_OP_ATOP] = combine_atop_u;
imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_u;
imp->combine_32[PIXMAN_OP_XOR] = combine_xor_u;
imp->combine_32[PIXMAN_OP_ADD] = combine_add_u;
imp->combine_32[PIXMAN_OP_SATURATE] = combine_saturate_u;
 
/* Disjoint, unified */
imp->combine_32[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear;
imp->combine_32[PIXMAN_OP_DISJOINT_SRC] = combine_src_u;
imp->combine_32[PIXMAN_OP_DISJOINT_DST] = combine_dst;
imp->combine_32[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_u;
imp->combine_32[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_u;
imp->combine_32[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_u;
imp->combine_32[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_u;
imp->combine_32[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_u;
imp->combine_32[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_u;
imp->combine_32[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_u;
imp->combine_32[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_u;
imp->combine_32[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_u;
 
/* Conjoint, unified */
imp->combine_32[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear;
imp->combine_32[PIXMAN_OP_CONJOINT_SRC] = combine_src_u;
imp->combine_32[PIXMAN_OP_CONJOINT_DST] = combine_dst;
imp->combine_32[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_u;
imp->combine_32[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_u;
imp->combine_32[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_u;
imp->combine_32[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_u;
imp->combine_32[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_u;
imp->combine_32[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_u;
imp->combine_32[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_u;
imp->combine_32[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_u;
imp->combine_32[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_u;
 
imp->combine_32[PIXMAN_OP_MULTIPLY] = combine_multiply_u;
imp->combine_32[PIXMAN_OP_SCREEN] = combine_screen_u;
imp->combine_32[PIXMAN_OP_OVERLAY] = combine_overlay_u;
imp->combine_32[PIXMAN_OP_DARKEN] = combine_darken_u;
imp->combine_32[PIXMAN_OP_LIGHTEN] = combine_lighten_u;
imp->combine_32[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_u;
imp->combine_32[PIXMAN_OP_COLOR_BURN] = combine_color_burn_u;
imp->combine_32[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_u;
imp->combine_32[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_u;
imp->combine_32[PIXMAN_OP_DIFFERENCE] = combine_difference_u;
imp->combine_32[PIXMAN_OP_EXCLUSION] = combine_exclusion_u;
imp->combine_32[PIXMAN_OP_HSL_HUE] = combine_hsl_hue_u;
imp->combine_32[PIXMAN_OP_HSL_SATURATION] = combine_hsl_saturation_u;
imp->combine_32[PIXMAN_OP_HSL_COLOR] = combine_hsl_color_u;
imp->combine_32[PIXMAN_OP_HSL_LUMINOSITY] = combine_hsl_luminosity_u;
 
/* Component alpha combiners */
imp->combine_32_ca[PIXMAN_OP_CLEAR] = combine_clear_ca;
imp->combine_32_ca[PIXMAN_OP_SRC] = combine_src_ca;
/* dest */
imp->combine_32_ca[PIXMAN_OP_OVER] = combine_over_ca;
imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_IN] = combine_in_ca;
imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_OUT] = combine_out_ca;
imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_ATOP] = combine_atop_ca;
imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_XOR] = combine_xor_ca;
imp->combine_32_ca[PIXMAN_OP_ADD] = combine_add_ca;
imp->combine_32_ca[PIXMAN_OP_SATURATE] = combine_saturate_ca;
 
/* Disjoint CA */
imp->combine_32_ca[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_SRC] = combine_src_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_DST] = combine_dst;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_ca;
 
/* Conjoint CA */
imp->combine_32_ca[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_SRC] = combine_src_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_DST] = combine_dst;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_ca;
 
imp->combine_32_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_ca;
imp->combine_32_ca[PIXMAN_OP_SCREEN] = combine_screen_ca;
imp->combine_32_ca[PIXMAN_OP_OVERLAY] = combine_overlay_ca;
imp->combine_32_ca[PIXMAN_OP_DARKEN] = combine_darken_ca;
imp->combine_32_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_ca;
imp->combine_32_ca[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_ca;
imp->combine_32_ca[PIXMAN_OP_COLOR_BURN] = combine_color_burn_ca;
imp->combine_32_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_ca;
imp->combine_32_ca[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_ca;
imp->combine_32_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_ca;
imp->combine_32_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_ca;
 
/* It is not clear that these make sense, so make them noops for now */
imp->combine_32_ca[PIXMAN_OP_HSL_HUE] = combine_dst;
imp->combine_32_ca[PIXMAN_OP_HSL_SATURATION] = combine_dst;
imp->combine_32_ca[PIXMAN_OP_HSL_COLOR] = combine_dst;
imp->combine_32_ca[PIXMAN_OP_HSL_LUMINOSITY] = combine_dst;
}
 
/programs/develop/libraries/pixman/pixman-combine32.h
0,0 → 1,230
/* WARNING: This file is generated by combine.pl from combine.inc.
Please edit one of those files rather than this one. */
 
#line 1 "pixman-combine.c.template"
 
#define COMPONENT_SIZE 8
#define MASK 0xff
#define ONE_HALF 0x80
 
#define A_SHIFT 8 * 3
#define R_SHIFT 8 * 2
#define G_SHIFT 8
#define A_MASK 0xff000000
#define R_MASK 0xff0000
#define G_MASK 0xff00
 
#define RB_MASK 0xff00ff
#define AG_MASK 0xff00ff00
#define RB_ONE_HALF 0x800080
#define RB_MASK_PLUS_ONE 0x10000100
 
#define ALPHA_8(x) ((x) >> A_SHIFT)
#define RED_8(x) (((x) >> R_SHIFT) & MASK)
#define GREEN_8(x) (((x) >> G_SHIFT) & MASK)
#define BLUE_8(x) ((x) & MASK)
 
/*
* Helper macros.
*/
 
#define MUL_UN8(a, b, t) \
((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
 
#define DIV_UN8(a, b) \
(((uint16_t) (a) * MASK) / (b))
 
#define ADD_UN8(x, y, t) \
((t) = (x) + (y), \
(uint32_t) (uint8_t) ((t) | (0 - ((t) >> G_SHIFT))))
 
#define DIV_ONE_UN8(x) \
(((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
 
/*
* The methods below use some tricks to be able to do two color
* components at the same time.
*/
 
/*
* x_rb = (x_rb * a) / 255
*/
#define UN8_rb_MUL_UN8(x, a, t) \
do \
{ \
t = ((x) & RB_MASK) * (a); \
t += RB_ONE_HALF; \
x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
x &= RB_MASK; \
} while (0)
 
/*
* x_rb = min (x_rb + y_rb, 255)
*/
#define UN8_rb_ADD_UN8_rb(x, y, t) \
do \
{ \
t = ((x) + (y)); \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
x = (t & RB_MASK); \
} while (0)
 
/*
* x_rb = (x_rb * a_rb) / 255
*/
#define UN8_rb_MUL_UN8_rb(x, a, t) \
do \
{ \
t = (x & MASK) * (a & MASK); \
t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
t += RB_ONE_HALF; \
t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
x = t & RB_MASK; \
} while (0)
 
/*
* x_c = (x_c * a) / 255
*/
#define UN8x4_MUL_UN8(x, a) \
do \
{ \
uint32_t r1__, r2__, t__; \
\
r1__ = (x); \
UN8_rb_MUL_UN8 (r1__, (a), t__); \
\
r2__ = (x) >> G_SHIFT; \
UN8_rb_MUL_UN8 (r2__, (a), t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a) / 255 + y_c
*/
#define UN8x4_MUL_UN8_ADD_UN8x4(x, a, y) \
do \
{ \
uint32_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (y) & RB_MASK; \
UN8_rb_MUL_UN8 (r1__, (a), t__); \
UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
\
r2__ = (x) >> G_SHIFT; \
r3__ = ((y) >> G_SHIFT) & RB_MASK; \
UN8_rb_MUL_UN8 (r2__, (a), t__); \
UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a + y_c * b) / 255
*/
#define UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8(x, a, y, b) \
do \
{ \
uint32_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (y); \
UN8_rb_MUL_UN8 (r1__, (a), t__); \
UN8_rb_MUL_UN8 (r2__, (b), t__); \
UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
\
r2__ = ((x) >> G_SHIFT); \
r3__ = ((y) >> G_SHIFT); \
UN8_rb_MUL_UN8 (r2__, (a), t__); \
UN8_rb_MUL_UN8 (r3__, (b), t__); \
UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a_c) / 255
*/
#define UN8x4_MUL_UN8x4(x, a) \
do \
{ \
uint32_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (a); \
UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \
\
r2__ = (x) >> G_SHIFT; \
r3__ = (a) >> G_SHIFT; \
UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a_c) / 255 + y_c
*/
#define UN8x4_MUL_UN8x4_ADD_UN8x4(x, a, y) \
do \
{ \
uint32_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (a); \
UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \
r2__ = (y) & RB_MASK; \
UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
\
r2__ = ((x) >> G_SHIFT); \
r3__ = ((a) >> G_SHIFT); \
UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \
r3__ = ((y) >> G_SHIFT) & RB_MASK; \
UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a_c + y_c * b) / 255
*/
#define UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8(x, a, y, b) \
do \
{ \
uint32_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (a); \
UN8_rb_MUL_UN8_rb (r1__, r2__, t__); \
r2__ = (y); \
UN8_rb_MUL_UN8 (r2__, (b), t__); \
UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
\
r2__ = (x) >> G_SHIFT; \
r3__ = (a) >> G_SHIFT; \
UN8_rb_MUL_UN8_rb (r2__, r3__, t__); \
r3__ = (y) >> G_SHIFT; \
UN8_rb_MUL_UN8 (r3__, (b), t__); \
UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
\
x = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
x_c = min(x_c + y_c, 255)
*/
#define UN8x4_ADD_UN8x4(x, y) \
do \
{ \
uint32_t r1__, r2__, r3__, t__; \
\
r1__ = (x) & RB_MASK; \
r2__ = (y) & RB_MASK; \
UN8_rb_ADD_UN8_rb (r1__, r2__, t__); \
\
r2__ = ((x) >> G_SHIFT) & RB_MASK; \
r3__ = ((y) >> G_SHIFT) & RB_MASK; \
UN8_rb_ADD_UN8_rb (r2__, r3__, t__); \
\
x = r1__ | (r2__ << G_SHIFT); \
} while (0)
/programs/develop/libraries/pixman/pixman-combine64.c
0,0 → 1,2465
/* WARNING: This file is generated by combine.pl from combine.inc.
Please edit one of those files rather than this one. */
 
#line 1 "pixman-combine.c.template"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <math.h>
#include <string.h>
 
#include "pixman-private.h"
 
#include "pixman-combine64.h"
 
/*** per channel helper functions ***/
 
static void
combine_mask_ca (uint64_t *src, uint64_t *mask)
{
uint64_t a = *mask;
 
uint64_t x;
uint32_t xa;
 
if (!a)
{
*(src) = 0;
return;
}
 
x = *(src);
if (a == ~0)
{
x = x >> A_SHIFT;
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
return;
}
 
xa = x >> A_SHIFT;
UN16x4_MUL_UN16x4 (x, a);
*(src) = x;
UN16x4_MUL_UN16 (a, xa);
*(mask) = a;
}
 
static void
combine_mask_value_ca (uint64_t *src, const uint64_t *mask)
{
uint64_t a = *mask;
uint64_t x;
 
if (!a)
{
*(src) = 0;
return;
}
 
if (a == ~0)
return;
 
x = *(src);
UN16x4_MUL_UN16x4 (x, a);
*(src) = x;
}
 
static void
combine_mask_alpha_ca (const uint64_t *src, uint64_t *mask)
{
uint64_t a = *(mask);
uint64_t x;
 
if (!a)
return;
 
x = *(src) >> A_SHIFT;
if (x == MASK)
return;
 
if (a == ~0)
{
x |= x << G_SHIFT;
x |= x << R_SHIFT;
*(mask) = x;
return;
}
 
UN16x4_MUL_UN16 (a, x);
*(mask) = a;
}
 
/*
* There are two ways of handling alpha -- either as a single unified value or
* a separate value for each component, hence each macro must have two
* versions. The unified alpha version has a 'U' at the end of the name,
* the component version has a 'C'. Similarly, functions which deal with
* this difference will have two versions using the same convention.
*/
 
/*
* All of the composing functions
*/
 
static force_inline uint64_t
combine_mask (const uint64_t *src, const uint64_t *mask, int i)
{
uint64_t s, m;
 
if (mask)
{
m = *(mask + i) >> A_SHIFT;
 
if (!m)
return 0;
}
 
s = *(src + i);
 
if (mask)
UN16x4_MUL_UN16 (s, m);
 
return s;
}
 
static void
combine_clear (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
memset (dest, 0, width * sizeof(uint64_t));
}
 
static void
combine_dst (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
return;
}
 
static void
combine_src_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
if (!mask)
memcpy (dest, src, width * sizeof (uint64_t));
else
{
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
 
*(dest + i) = s;
}
}
}
 
/* if the Src is opaque, call combine_src_u */
static void
combine_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t ia = ALPHA_16 (~s);
 
UN16x4_MUL_UN16_ADD_UN16x4 (d, ia, s);
*(dest + i) = d;
}
}
 
/* if the Dst is opaque, this is a noop */
static void
combine_over_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t ia = ALPHA_16 (~*(dest + i));
UN16x4_MUL_UN16_ADD_UN16x4 (s, ia, d);
*(dest + i) = s;
}
}
 
/* if the Dst is opaque, call combine_src_u */
static void
combine_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t a = ALPHA_16 (*(dest + i));
UN16x4_MUL_UN16 (s, a);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, this is a noop */
static void
combine_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t a = ALPHA_16 (s);
UN16x4_MUL_UN16 (d, a);
*(dest + i) = d;
}
}
 
/* if the Dst is opaque, call combine_clear */
static void
combine_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t a = ALPHA_16 (~*(dest + i));
UN16x4_MUL_UN16 (s, a);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, call combine_clear */
static void
combine_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t a = ALPHA_16 (~s);
UN16x4_MUL_UN16 (d, a);
*(dest + i) = d;
}
}
 
/* if the Src is opaque, call combine_in_u */
/* if the Dst is opaque, call combine_over_u */
/* if both the Src and Dst are opaque, call combine_src_u */
static void
combine_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t dest_a = ALPHA_16 (d);
uint64_t src_ia = ALPHA_16 (~s);
 
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (s, dest_a, d, src_ia);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, call combine_over_reverse_u */
/* if the Dst is opaque, call combine_in_reverse_u */
/* if both the Src and Dst are opaque, call combine_dst_u */
static void
combine_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t src_a = ALPHA_16 (s);
uint64_t dest_ia = ALPHA_16 (~d);
 
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (s, dest_ia, d, src_a);
*(dest + i) = s;
}
}
 
/* if the Src is opaque, call combine_over_u */
/* if the Dst is opaque, call combine_over_reverse_u */
/* if both the Src and Dst are opaque, call combine_clear */
static void
combine_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t src_ia = ALPHA_16 (~s);
uint64_t dest_ia = ALPHA_16 (~d);
 
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (s, dest_ia, d, src_ia);
*(dest + i) = s;
}
}
 
static void
combine_add_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
UN16x4_ADD_UN16x4 (d, s);
*(dest + i) = d;
}
}
 
/* if the Src is opaque, call combine_add_u */
/* if the Dst is opaque, call combine_add_u */
/* if both the Src and Dst are opaque, call combine_add_u */
static void
combine_saturate_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint32_t sa, da;
 
sa = s >> A_SHIFT;
da = ~d >> A_SHIFT;
if (sa > da)
{
sa = DIV_UN16 (da, sa);
UN16x4_MUL_UN16 (s, sa);
}
;
UN16x4_ADD_UN16x4 (d, s);
*(dest + i) = d;
}
}
 
/*
* PDF blend modes:
* The following blend modes have been taken from the PDF ISO 32000
* specification, which at this point in time is available from
* http://www.adobe.com/devnet/acrobat/pdfs/PDF32000_2008.pdf
* The relevant chapters are 11.3.5 and 11.3.6.
* The formula for computing the final pixel color given in 11.3.6 is:
* αr × Cr = (1 – αs) × αb × Cb + (1 – αb) × αs × Cs + αb × αs × B(Cb, Cs)
* with B() being the blend function.
* Note that OVER is a special case of this operation, using B(Cb, Cs) = Cs
*
* These blend modes should match the SVG filter draft specification, as
* it has been designed to mirror ISO 32000. Note that at the current point
* no released draft exists that shows this, as the formulas have not been
* updated yet after the release of ISO 32000.
*
* The default implementation here uses the PDF_SEPARABLE_BLEND_MODE and
* PDF_NON_SEPARABLE_BLEND_MODE macros, which take the blend function as an
* argument. Note that this implementation operates on premultiplied colors,
* while the PDF specification does not. Therefore the code uses the formula
* ar.Cra = (1 – as) . Dca + (1 – ad) . Sca + B(Dca, ad, Sca, as)
*/
 
/*
* Multiply
* B(Dca, ad, Sca, as) = Dca.Sca
*/
 
static void
combine_multiply_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t ss = s;
uint64_t src_ia = ALPHA_16 (~s);
uint64_t dest_ia = ALPHA_16 (~d);
 
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (ss, dest_ia, d, src_ia);
UN16x4_MUL_UN16x4 (d, s);
UN16x4_ADD_UN16x4 (d, ss);
 
*(dest + i) = d;
}
}
 
static void
combine_multiply_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t m = *(mask + i);
uint64_t s = *(src + i);
uint64_t d = *(dest + i);
uint64_t r = d;
uint64_t dest_ia = ALPHA_16 (~d);
 
combine_mask_value_ca (&s, &m);
 
UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16 (r, ~m, s, dest_ia);
UN16x4_MUL_UN16x4 (d, s);
UN16x4_ADD_UN16x4 (r, d);
 
*(dest + i) = r;
}
}
 
#define PDF_SEPARABLE_BLEND_MODE(name) \
static void \
combine_ ## name ## _u (pixman_implementation_t *imp, \
pixman_op_t op, \
uint64_t * dest, \
const uint64_t * src, \
const uint64_t * mask, \
int width) \
{ \
int i; \
for (i = 0; i < width; ++i) { \
uint64_t s = combine_mask (src, mask, i); \
uint64_t d = *(dest + i); \
uint16_t sa = ALPHA_16 (s); \
uint16_t isa = ~sa; \
uint16_t da = ALPHA_16 (d); \
uint16_t ida = ~da; \
uint64_t result; \
\
result = d; \
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (result, isa, s, ida); \
\
*(dest + i) = result + \
(DIV_ONE_UN16 (sa * da) << A_SHIFT) + \
(blend_ ## name (RED_16 (d), da, RED_16 (s), sa) << R_SHIFT) + \
(blend_ ## name (GREEN_16 (d), da, GREEN_16 (s), sa) << G_SHIFT) + \
(blend_ ## name (BLUE_16 (d), da, BLUE_16 (s), sa)); \
} \
} \
\
static void \
combine_ ## name ## _ca (pixman_implementation_t *imp, \
pixman_op_t op, \
uint64_t * dest, \
const uint64_t * src, \
const uint64_t * mask, \
int width) \
{ \
int i; \
for (i = 0; i < width; ++i) { \
uint64_t m = *(mask + i); \
uint64_t s = *(src + i); \
uint64_t d = *(dest + i); \
uint16_t da = ALPHA_16 (d); \
uint16_t ida = ~da; \
uint64_t result; \
\
combine_mask_value_ca (&s, &m); \
\
result = d; \
UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16 (result, ~m, s, ida); \
\
result += \
(DIV_ONE_UN16 (ALPHA_16 (m) * da) << A_SHIFT) + \
(blend_ ## name (RED_16 (d), da, RED_16 (s), RED_16 (m)) << R_SHIFT) + \
(blend_ ## name (GREEN_16 (d), da, GREEN_16 (s), GREEN_16 (m)) << G_SHIFT) + \
(blend_ ## name (BLUE_16 (d), da, BLUE_16 (s), BLUE_16 (m))); \
\
*(dest + i) = result; \
} \
}
 
/*
* Screen
* B(Dca, ad, Sca, as) = Dca.sa + Sca.da - Dca.Sca
*/
static inline uint64_t
blend_screen (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
return DIV_ONE_UN16 (sca * da + dca * sa - sca * dca);
}
 
PDF_SEPARABLE_BLEND_MODE (screen)
 
/*
* Overlay
* B(Dca, Da, Sca, Sa) =
* if 2.Dca < Da
* 2.Sca.Dca
* otherwise
* Sa.Da - 2.(Da - Dca).(Sa - Sca)
*/
static inline uint64_t
blend_overlay (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
uint64_t rca;
 
if (2 * dca < da)
rca = 2 * sca * dca;
else
rca = sa * da - 2 * (da - dca) * (sa - sca);
return DIV_ONE_UN16 (rca);
}
 
PDF_SEPARABLE_BLEND_MODE (overlay)
 
/*
* Darken
* B(Dca, Da, Sca, Sa) = min (Sca.Da, Dca.Sa)
*/
static inline uint64_t
blend_darken (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
uint64_t s, d;
 
s = sca * da;
d = dca * sa;
return DIV_ONE_UN16 (s > d ? d : s);
}
 
PDF_SEPARABLE_BLEND_MODE (darken)
 
/*
* Lighten
* B(Dca, Da, Sca, Sa) = max (Sca.Da, Dca.Sa)
*/
static inline uint64_t
blend_lighten (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
uint64_t s, d;
 
s = sca * da;
d = dca * sa;
return DIV_ONE_UN16 (s > d ? s : d);
}
 
PDF_SEPARABLE_BLEND_MODE (lighten)
 
/*
* Color dodge
* B(Dca, Da, Sca, Sa) =
* if Dca == 0
* 0
* if Sca == Sa
* Sa.Da
* otherwise
* Sa.Da. min (1, Dca / Da / (1 - Sca/Sa))
*/
static inline uint64_t
blend_color_dodge (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
if (sca >= sa)
{
return dca == 0 ? 0 : DIV_ONE_UN16 (sa * da);
}
else
{
uint64_t rca = dca * sa / (sa - sca);
return DIV_ONE_UN16 (sa * MIN (rca, da));
}
}
 
PDF_SEPARABLE_BLEND_MODE (color_dodge)
 
/*
* Color burn
* B(Dca, Da, Sca, Sa) =
* if Dca == Da
* Sa.Da
* if Sca == 0
* 0
* otherwise
* Sa.Da.(1 - min (1, (1 - Dca/Da).Sa / Sca))
*/
static inline uint64_t
blend_color_burn (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
if (sca == 0)
{
return dca < da ? 0 : DIV_ONE_UN16 (sa * da);
}
else
{
uint64_t rca = (da - dca) * sa / sca;
return DIV_ONE_UN16 (sa * (MAX (rca, da) - rca));
}
}
 
PDF_SEPARABLE_BLEND_MODE (color_burn)
 
/*
* Hard light
* B(Dca, Da, Sca, Sa) =
* if 2.Sca < Sa
* 2.Sca.Dca
* otherwise
* Sa.Da - 2.(Da - Dca).(Sa - Sca)
*/
static inline uint64_t
blend_hard_light (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
if (2 * sca < sa)
return DIV_ONE_UN16 (2 * sca * dca);
else
return DIV_ONE_UN16 (sa * da - 2 * (da - dca) * (sa - sca));
}
 
PDF_SEPARABLE_BLEND_MODE (hard_light)
 
/*
* Soft light
* B(Dca, Da, Sca, Sa) =
* if (2.Sca <= Sa)
* Dca.(Sa - (1 - Dca/Da).(2.Sca - Sa))
* otherwise if Dca.4 <= Da
* Dca.(Sa + (2.Sca - Sa).((16.Dca/Da - 12).Dca/Da + 3)
* otherwise
* (Dca.Sa + (SQRT (Dca/Da).Da - Dca).(2.Sca - Sa))
*/
static inline uint64_t
blend_soft_light (uint64_t dca_org,
uint64_t da_org,
uint64_t sca_org,
uint64_t sa_org)
{
double dca = dca_org * (1.0 / MASK);
double da = da_org * (1.0 / MASK);
double sca = sca_org * (1.0 / MASK);
double sa = sa_org * (1.0 / MASK);
double rca;
 
if (2 * sca < sa)
{
if (da == 0)
rca = dca * sa;
else
rca = dca * sa - dca * (da - dca) * (sa - 2 * sca) / da;
}
else if (da == 0)
{
rca = 0;
}
else if (4 * dca <= da)
{
rca = dca * sa +
(2 * sca - sa) * dca * ((16 * dca / da - 12) * dca / da + 3);
}
else
{
rca = dca * sa + (sqrt (dca * da) - dca) * (2 * sca - sa);
}
return rca * MASK + 0.5;
}
 
PDF_SEPARABLE_BLEND_MODE (soft_light)
 
/*
* Difference
* B(Dca, Da, Sca, Sa) = abs (Dca.Sa - Sca.Da)
*/
static inline uint64_t
blend_difference (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
uint64_t dcasa = dca * sa;
uint64_t scada = sca * da;
 
if (scada < dcasa)
return DIV_ONE_UN16 (dcasa - scada);
else
return DIV_ONE_UN16 (scada - dcasa);
}
 
PDF_SEPARABLE_BLEND_MODE (difference)
 
/*
* Exclusion
* B(Dca, Da, Sca, Sa) = (Sca.Da + Dca.Sa - 2.Sca.Dca)
*/
 
/* This can be made faster by writing it directly and not using
* PDF_SEPARABLE_BLEND_MODE, but that's a performance optimization */
 
static inline uint64_t
blend_exclusion (uint64_t dca, uint64_t da, uint64_t sca, uint64_t sa)
{
return DIV_ONE_UN16 (sca * da + dca * sa - 2 * dca * sca);
}
 
PDF_SEPARABLE_BLEND_MODE (exclusion)
 
#undef PDF_SEPARABLE_BLEND_MODE
 
/*
* PDF nonseperable blend modes are implemented using the following functions
* to operate in Hsl space, with Cmax, Cmid, Cmin referring to the max, mid
* and min value of the red, green and blue components.
*
* LUM (C) = 0.3 × Cred + 0.59 × Cgreen + 0.11 × Cblue
*
* clip_color (C):
* l = LUM (C)
* min = Cmin
* max = Cmax
* if n < 0.0
* C = l + ( ( ( C – l ) × l ) ⁄ ( l – min ) )
* if x > 1.0
* C = l + ( ( ( C – l ) × ( 1 – l ) ) ⁄ ( max – l ) )
* return C
*
* set_lum (C, l):
* d = l – LUM (C)
* C += d
* return clip_color (C)
*
* SAT (C) = CH_MAX (C) - CH_MIN (C)
*
* set_sat (C, s):
* if Cmax > Cmin
* Cmid = ( ( ( Cmid – Cmin ) × s ) ⁄ ( Cmax – Cmin ) )
* Cmax = s
* else
* Cmid = Cmax = 0.0
* Cmin = 0.0
* return C
*/
 
/* For premultiplied colors, we need to know what happens when C is
* multiplied by a real number. LUM and SAT are linear:
*
* LUM (r × C) = r × LUM (C) SAT (r * C) = r * SAT (C)
*
* If we extend clip_color with an extra argument a and change
*
* if x >= 1.0
*
* into
*
* if x >= a
*
* then clip_color is also linear:
*
* r * clip_color (C, a) = clip_color (r_c, ra);
*
* for positive r.
*
* Similarly, we can extend set_lum with an extra argument that is just passed
* on to clip_color:
*
* r * set_lum ( C, l, a)
*
* = r × clip_color ( C + l - LUM (C), a)
*
* = clip_color ( r * C + r × l - r * LUM (C), r * a)
*
* = set_lum ( r * C, r * l, r * a)
*
* Finally, set_sat:
*
* r * set_sat (C, s) = set_sat (x * C, r * s)
*
* The above holds for all non-zero x, because they x'es in the fraction for
* C_mid cancel out. Specifically, it holds for x = r:
*
* r * set_sat (C, s) = set_sat (r_c, rs)
*
*/
 
/* So, for the non-separable PDF blend modes, we have (using s, d for
* non-premultiplied colors, and S, D for premultiplied:
*
* Color:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (S/a_s, LUM (D/a_d), 1)
* = set_lum (S * a_d, a_s * LUM (D), a_s * a_d)
*
*
* Luminosity:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (D/a_d, LUM(S/a_s), 1)
* = set_lum (a_s * D, a_d * LUM(S), a_s * a_d)
*
*
* Saturation:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (set_sat (D/a_d, SAT (S/a_s)), LUM (D/a_d), 1)
* = set_lum (a_s * a_d * set_sat (D/a_d, SAT (S/a_s)),
* a_s * LUM (D), a_s * a_d)
* = set_lum (set_sat (a_s * D, a_d * SAT (S), a_s * LUM (D), a_s * a_d))
*
* Hue:
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (set_sat (S/a_s, SAT (D/a_d)), LUM (D/a_d), 1)
* = a_s * a_d * set_lum (set_sat (a_d * S, a_s * SAT (D)),
* a_s * LUM (D), a_s * a_d)
*
*/
 
#define CH_MIN(c) (c[0] < c[1] ? (c[0] < c[2] ? c[0] : c[2]) : (c[1] < c[2] ? c[1] : c[2]))
#define CH_MAX(c) (c[0] > c[1] ? (c[0] > c[2] ? c[0] : c[2]) : (c[1] > c[2] ? c[1] : c[2]))
#define LUM(c) ((c[0] * 30 + c[1] * 59 + c[2] * 11) / 100)
#define SAT(c) (CH_MAX (c) - CH_MIN (c))
 
#define PDF_NON_SEPARABLE_BLEND_MODE(name) \
static void \
combine_ ## name ## _u (pixman_implementation_t *imp, \
pixman_op_t op, \
uint64_t *dest, \
const uint64_t *src, \
const uint64_t *mask, \
int width) \
{ \
int i; \
for (i = 0; i < width; ++i) \
{ \
uint64_t s = combine_mask (src, mask, i); \
uint64_t d = *(dest + i); \
uint16_t sa = ALPHA_16 (s); \
uint16_t isa = ~sa; \
uint16_t da = ALPHA_16 (d); \
uint16_t ida = ~da; \
uint64_t result; \
uint64_t sc[3], dc[3], c[3]; \
\
result = d; \
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (result, isa, s, ida); \
dc[0] = RED_16 (d); \
sc[0] = RED_16 (s); \
dc[1] = GREEN_16 (d); \
sc[1] = GREEN_16 (s); \
dc[2] = BLUE_16 (d); \
sc[2] = BLUE_16 (s); \
blend_ ## name (c, dc, da, sc, sa); \
\
*(dest + i) = result + \
(DIV_ONE_UN16 (sa * da) << A_SHIFT) + \
(DIV_ONE_UN16 (c[0]) << R_SHIFT) + \
(DIV_ONE_UN16 (c[1]) << G_SHIFT) + \
(DIV_ONE_UN16 (c[2])); \
} \
}
 
static void
set_lum (uint64_t dest[3], uint64_t src[3], uint64_t sa, uint64_t lum)
{
double a, l, min, max;
double tmp[3];
 
a = sa * (1.0 / MASK);
 
l = lum * (1.0 / MASK);
tmp[0] = src[0] * (1.0 / MASK);
tmp[1] = src[1] * (1.0 / MASK);
tmp[2] = src[2] * (1.0 / MASK);
 
l = l - LUM (tmp);
tmp[0] += l;
tmp[1] += l;
tmp[2] += l;
 
/* clip_color */
l = LUM (tmp);
min = CH_MIN (tmp);
max = CH_MAX (tmp);
 
if (min < 0)
{
if (l - min == 0.0)
{
tmp[0] = 0;
tmp[1] = 0;
tmp[2] = 0;
}
else
{
tmp[0] = l + (tmp[0] - l) * l / (l - min);
tmp[1] = l + (tmp[1] - l) * l / (l - min);
tmp[2] = l + (tmp[2] - l) * l / (l - min);
}
}
if (max > a)
{
if (max - l == 0.0)
{
tmp[0] = a;
tmp[1] = a;
tmp[2] = a;
}
else
{
tmp[0] = l + (tmp[0] - l) * (a - l) / (max - l);
tmp[1] = l + (tmp[1] - l) * (a - l) / (max - l);
tmp[2] = l + (tmp[2] - l) * (a - l) / (max - l);
}
}
 
dest[0] = tmp[0] * MASK + 0.5;
dest[1] = tmp[1] * MASK + 0.5;
dest[2] = tmp[2] * MASK + 0.5;
}
 
static void
set_sat (uint64_t dest[3], uint64_t src[3], uint64_t sat)
{
int id[3];
uint64_t min, max;
 
if (src[0] > src[1])
{
if (src[0] > src[2])
{
id[0] = 0;
if (src[1] > src[2])
{
id[1] = 1;
id[2] = 2;
}
else
{
id[1] = 2;
id[2] = 1;
}
}
else
{
id[0] = 2;
id[1] = 0;
id[2] = 1;
}
}
else
{
if (src[0] > src[2])
{
id[0] = 1;
id[1] = 0;
id[2] = 2;
}
else
{
id[2] = 0;
if (src[1] > src[2])
{
id[0] = 1;
id[1] = 2;
}
else
{
id[0] = 2;
id[1] = 1;
}
}
}
 
max = dest[id[0]];
min = dest[id[2]];
if (max > min)
{
dest[id[1]] = (dest[id[1]] - min) * sat / (max - min);
dest[id[0]] = sat;
dest[id[2]] = 0;
}
else
{
dest[0] = dest[1] = dest[2] = 0;
}
}
 
/*
* Hue:
* B(Cb, Cs) = set_lum (set_sat (Cs, SAT (Cb)), LUM (Cb))
*/
static inline void
blend_hsl_hue (uint64_t c[3],
uint64_t dc[3],
uint64_t da,
uint64_t sc[3],
uint64_t sa)
{
c[0] = sc[0] * da;
c[1] = sc[1] * da;
c[2] = sc[2] * da;
set_sat (c, c, SAT (dc) * sa);
set_lum (c, c, sa * da, LUM (dc) * sa);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_hue)
 
/*
* Saturation:
* B(Cb, Cs) = set_lum (set_sat (Cb, SAT (Cs)), LUM (Cb))
*/
static inline void
blend_hsl_saturation (uint64_t c[3],
uint64_t dc[3],
uint64_t da,
uint64_t sc[3],
uint64_t sa)
{
c[0] = dc[0] * sa;
c[1] = dc[1] * sa;
c[2] = dc[2] * sa;
set_sat (c, c, SAT (sc) * da);
set_lum (c, c, sa * da, LUM (dc) * sa);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_saturation)
 
/*
* Color:
* B(Cb, Cs) = set_lum (Cs, LUM (Cb))
*/
static inline void
blend_hsl_color (uint64_t c[3],
uint64_t dc[3],
uint64_t da,
uint64_t sc[3],
uint64_t sa)
{
c[0] = sc[0] * da;
c[1] = sc[1] * da;
c[2] = sc[2] * da;
set_lum (c, c, sa * da, LUM (dc) * sa);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_color)
 
/*
* Luminosity:
* B(Cb, Cs) = set_lum (Cb, LUM (Cs))
*/
static inline void
blend_hsl_luminosity (uint64_t c[3],
uint64_t dc[3],
uint64_t da,
uint64_t sc[3],
uint64_t sa)
{
c[0] = dc[0] * sa;
c[1] = dc[1] * sa;
c[2] = dc[2] * sa;
set_lum (c, c, sa * da, LUM (sc) * da);
}
 
PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
 
#undef SAT
#undef LUM
#undef CH_MAX
#undef CH_MIN
#undef PDF_NON_SEPARABLE_BLEND_MODE
 
/* Overlay
*
* All of the disjoint composing functions
*
* The four entries in the first column indicate what source contributions
* come from each of the four areas of the picture -- areas covered by neither
* A nor B, areas covered only by A, areas covered only by B and finally
* areas covered by both A and B.
*
* Disjoint Conjoint
* Fa Fb Fa Fb
* (0,0,0,0) 0 0 0 0
* (0,A,0,A) 1 0 1 0
* (0,0,B,B) 0 1 0 1
* (0,A,B,A) 1 min((1-a)/b,1) 1 max(1-a/b,0)
* (0,A,B,B) min((1-b)/a,1) 1 max(1-b/a,0) 1
* (0,0,0,A) max(1-(1-b)/a,0) 0 min(1,b/a) 0
* (0,0,0,B) 0 max(1-(1-a)/b,0) 0 min(a/b,1)
* (0,A,0,0) min(1,(1-b)/a) 0 max(1-b/a,0) 0
* (0,0,B,0) 0 min(1,(1-a)/b) 0 max(1-a/b,0)
* (0,0,B,A) max(1-(1-b)/a,0) min(1,(1-a)/b) min(1,b/a) max(1-a/b,0)
* (0,A,0,B) min(1,(1-b)/a) max(1-(1-a)/b,0) max(1-b/a,0) min(1,a/b)
* (0,A,B,0) min(1,(1-b)/a) min(1,(1-a)/b) max(1-b/a,0) max(1-a/b,0)
*/
 
#define COMBINE_A_OUT 1
#define COMBINE_A_IN 2
#define COMBINE_B_OUT 4
#define COMBINE_B_IN 8
 
#define COMBINE_CLEAR 0
#define COMBINE_A (COMBINE_A_OUT | COMBINE_A_IN)
#define COMBINE_B (COMBINE_B_OUT | COMBINE_B_IN)
#define COMBINE_A_OVER (COMBINE_A_OUT | COMBINE_B_OUT | COMBINE_A_IN)
#define COMBINE_B_OVER (COMBINE_A_OUT | COMBINE_B_OUT | COMBINE_B_IN)
#define COMBINE_A_ATOP (COMBINE_B_OUT | COMBINE_A_IN)
#define COMBINE_B_ATOP (COMBINE_A_OUT | COMBINE_B_IN)
#define COMBINE_XOR (COMBINE_A_OUT | COMBINE_B_OUT)
 
/* portion covered by a but not b */
static uint16_t
combine_disjoint_out_part (uint16_t a, uint16_t b)
{
/* min (1, (1-b) / a) */
 
b = ~b; /* 1 - b */
if (b >= a) /* 1 - b >= a -> (1-b)/a >= 1 */
return MASK; /* 1 */
return DIV_UN16 (b, a); /* (1-b) / a */
}
 
/* portion covered by both a and b */
static uint16_t
combine_disjoint_in_part (uint16_t a, uint16_t b)
{
/* max (1-(1-b)/a,0) */
/* = - min ((1-b)/a - 1, 0) */
/* = 1 - min (1, (1-b)/a) */
 
b = ~b; /* 1 - b */
if (b >= a) /* 1 - b >= a -> (1-b)/a >= 1 */
return 0; /* 1 - 1 */
return ~DIV_UN16(b, a); /* 1 - (1-b) / a */
}
 
/* portion covered by a but not b */
static uint16_t
combine_conjoint_out_part (uint16_t a, uint16_t b)
{
/* max (1-b/a,0) */
/* = 1-min(b/a,1) */
 
/* min (1, (1-b) / a) */
 
if (b >= a) /* b >= a -> b/a >= 1 */
return 0x00; /* 0 */
return ~DIV_UN16(b, a); /* 1 - b/a */
}
 
/* portion covered by both a and b */
static uint16_t
combine_conjoint_in_part (uint16_t a, uint16_t b)
{
/* min (1,b/a) */
 
if (b >= a) /* b >= a -> b/a >= 1 */
return MASK; /* 1 */
return DIV_UN16 (b, a); /* b/a */
}
 
#define GET_COMP(v, i) ((uint32_t) (uint16_t) ((v) >> i))
 
#define ADD(x, y, i, t) \
((t) = GET_COMP (x, i) + GET_COMP (y, i), \
(uint64_t) ((uint16_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
 
#define GENERIC(x, y, i, ax, ay, t, u, v) \
((t) = (MUL_UN16 (GET_COMP (y, i), ay, (u)) + \
MUL_UN16 (GET_COMP (x, i), ax, (v))), \
(uint64_t) ((uint16_t) ((t) | \
(0 - ((t) >> G_SHIFT)))) << (i))
 
static void
combine_disjoint_general_u (uint64_t * dest,
const uint64_t *src,
const uint64_t *mask,
int width,
uint16_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t m, n, o, p;
uint32_t Fa, Fb, t, u, v;
uint16_t sa = s >> A_SHIFT;
uint16_t da = d >> A_SHIFT;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
Fa = combine_disjoint_out_part (sa, da);
break;
 
case COMBINE_A_IN:
Fa = combine_disjoint_in_part (sa, da);
break;
 
case COMBINE_A:
Fa = MASK;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
Fb = combine_disjoint_out_part (da, sa);
break;
 
case COMBINE_B_IN:
Fb = combine_disjoint_in_part (da, sa);
break;
 
case COMBINE_B:
Fb = MASK;
break;
}
m = GENERIC (s, d, 0, Fa, Fb, t, u, v);
n = GENERIC (s, d, G_SHIFT, Fa, Fb, t, u, v);
o = GENERIC (s, d, R_SHIFT, Fa, Fb, t, u, v);
p = GENERIC (s, d, A_SHIFT, Fa, Fb, t, u, v);
s = m | n | o | p;
*(dest + i) = s;
}
}
 
static void
combine_disjoint_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint32_t a = s >> A_SHIFT;
 
if (s != 0x00)
{
uint64_t d = *(dest + i);
a = combine_disjoint_out_part (d >> A_SHIFT, a);
UN16x4_MUL_UN16_ADD_UN16x4 (d, a, s);
 
*(dest + i) = d;
}
}
}
 
static void
combine_disjoint_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_disjoint_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_disjoint_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_disjoint_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_disjoint_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_disjoint_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_disjoint_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_u (dest, src, mask, width, COMBINE_XOR);
}
 
static void
combine_conjoint_general_u (uint64_t * dest,
const uint64_t *src,
const uint64_t *mask,
int width,
uint16_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = combine_mask (src, mask, i);
uint64_t d = *(dest + i);
uint64_t m, n, o, p;
uint32_t Fa, Fb, t, u, v;
uint16_t sa = s >> A_SHIFT;
uint16_t da = d >> A_SHIFT;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
Fa = combine_conjoint_out_part (sa, da);
break;
 
case COMBINE_A_IN:
Fa = combine_conjoint_in_part (sa, da);
break;
 
case COMBINE_A:
Fa = MASK;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
Fb = combine_conjoint_out_part (da, sa);
break;
 
case COMBINE_B_IN:
Fb = combine_conjoint_in_part (da, sa);
break;
 
case COMBINE_B:
Fb = MASK;
break;
}
 
m = GENERIC (s, d, 0, Fa, Fb, t, u, v);
n = GENERIC (s, d, G_SHIFT, Fa, Fb, t, u, v);
o = GENERIC (s, d, R_SHIFT, Fa, Fb, t, u, v);
p = GENERIC (s, d, A_SHIFT, Fa, Fb, t, u, v);
 
s = m | n | o | p;
 
*(dest + i) = s;
}
}
 
static void
combine_conjoint_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_OVER);
}
 
static void
combine_conjoint_over_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_OVER);
}
 
static void
combine_conjoint_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_conjoint_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_conjoint_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_conjoint_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_conjoint_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_conjoint_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_conjoint_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_u (dest, src, mask, width, COMBINE_XOR);
}
 
/************************************************************************/
/*********************** Per Channel functions **************************/
/************************************************************************/
 
static void
combine_clear_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
memset (dest, 0, width * sizeof(uint64_t));
}
 
static void
combine_src_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
 
combine_mask_value_ca (&s, &m);
 
*(dest + i) = s;
}
}
 
static void
combine_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t a;
 
combine_mask_ca (&s, &m);
 
a = ~m;
if (a)
{
uint64_t d = *(dest + i);
UN16x4_MUL_UN16x4_ADD_UN16x4 (d, a, s);
s = d;
}
 
*(dest + i) = s;
}
}
 
static void
combine_over_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t d = *(dest + i);
uint64_t a = ~d >> A_SHIFT;
 
if (a)
{
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
 
UN16x4_MUL_UN16x4 (s, m);
UN16x4_MUL_UN16_ADD_UN16x4 (s, a, d);
 
*(dest + i) = s;
}
}
}
 
static void
combine_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t d = *(dest + i);
uint32_t a = d >> A_SHIFT;
uint64_t s = 0;
 
if (a)
{
uint64_t m = *(mask + i);
 
s = *(src + i);
combine_mask_value_ca (&s, &m);
 
if (a != MASK)
UN16x4_MUL_UN16 (s, a);
}
 
*(dest + i) = s;
}
}
 
static void
combine_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t a;
 
combine_mask_alpha_ca (&s, &m);
 
a = m;
if (a != ~0)
{
uint64_t d = 0;
 
if (a)
{
d = *(dest + i);
UN16x4_MUL_UN16x4 (d, a);
}
 
*(dest + i) = d;
}
}
}
 
static void
combine_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t d = *(dest + i);
uint32_t a = ~d >> A_SHIFT;
uint64_t s = 0;
 
if (a)
{
uint64_t m = *(mask + i);
 
s = *(src + i);
combine_mask_value_ca (&s, &m);
 
if (a != MASK)
UN16x4_MUL_UN16 (s, a);
}
 
*(dest + i) = s;
}
}
 
static void
combine_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t a;
 
combine_mask_alpha_ca (&s, &m);
 
a = ~m;
if (a != ~0)
{
uint64_t d = 0;
 
if (a)
{
d = *(dest + i);
UN16x4_MUL_UN16x4 (d, a);
}
 
*(dest + i) = d;
}
}
}
 
static void
combine_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t d = *(dest + i);
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t ad;
uint32_t as = d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
ad = ~m;
 
UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16 (d, ad, s, as);
 
*(dest + i) = d;
}
}
 
static void
combine_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t d = *(dest + i);
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t ad;
uint32_t as = ~d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
ad = m;
 
UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16 (d, ad, s, as);
 
*(dest + i) = d;
}
}
 
static void
combine_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t d = *(dest + i);
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t ad;
uint32_t as = ~d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
ad = ~m;
 
UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16 (d, ad, s, as);
 
*(dest + i) = d;
}
}
 
static void
combine_add_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s = *(src + i);
uint64_t m = *(mask + i);
uint64_t d = *(dest + i);
 
combine_mask_value_ca (&s, &m);
 
UN16x4_ADD_UN16x4 (d, s);
 
*(dest + i) = d;
}
}
 
static void
combine_saturate_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s, d;
uint32_t sa, sr, sg, sb, da;
uint32_t t, u, v;
uint64_t m, n, o, p;
 
d = *(dest + i);
s = *(src + i);
m = *(mask + i);
 
combine_mask_ca (&s, &m);
 
sa = (m >> A_SHIFT);
sr = (m >> R_SHIFT) & MASK;
sg = (m >> G_SHIFT) & MASK;
sb = m & MASK;
da = ~d >> A_SHIFT;
 
if (sb <= da)
m = ADD (s, d, 0, t);
else
m = GENERIC (s, d, 0, (da << G_SHIFT) / sb, MASK, t, u, v);
 
if (sg <= da)
n = ADD (s, d, G_SHIFT, t);
else
n = GENERIC (s, d, G_SHIFT, (da << G_SHIFT) / sg, MASK, t, u, v);
 
if (sr <= da)
o = ADD (s, d, R_SHIFT, t);
else
o = GENERIC (s, d, R_SHIFT, (da << G_SHIFT) / sr, MASK, t, u, v);
 
if (sa <= da)
p = ADD (s, d, A_SHIFT, t);
else
p = GENERIC (s, d, A_SHIFT, (da << G_SHIFT) / sa, MASK, t, u, v);
 
*(dest + i) = m | n | o | p;
}
}
 
static void
combine_disjoint_general_ca (uint64_t * dest,
const uint64_t *src,
const uint64_t *mask,
int width,
uint16_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s, d;
uint64_t m, n, o, p;
uint64_t Fa, Fb;
uint32_t t, u, v;
uint64_t sa;
uint16_t da;
 
s = *(src + i);
m = *(mask + i);
d = *(dest + i);
da = d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
sa = m;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
m = (uint64_t)combine_disjoint_out_part ((uint16_t) (sa >> 0), da);
n = (uint64_t)combine_disjoint_out_part ((uint16_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint64_t)combine_disjoint_out_part ((uint16_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint64_t)combine_disjoint_out_part ((uint16_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A_IN:
m = (uint64_t)combine_disjoint_in_part ((uint16_t) (sa >> 0), da);
n = (uint64_t)combine_disjoint_in_part ((uint16_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint64_t)combine_disjoint_in_part ((uint16_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint64_t)combine_disjoint_in_part ((uint16_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A:
Fa = ~0;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
m = (uint64_t)combine_disjoint_out_part (da, (uint16_t) (sa >> 0));
n = (uint64_t)combine_disjoint_out_part (da, (uint16_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint64_t)combine_disjoint_out_part (da, (uint16_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint64_t)combine_disjoint_out_part (da, (uint16_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B_IN:
m = (uint64_t)combine_disjoint_in_part (da, (uint16_t) (sa >> 0));
n = (uint64_t)combine_disjoint_in_part (da, (uint16_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint64_t)combine_disjoint_in_part (da, (uint16_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint64_t)combine_disjoint_in_part (da, (uint16_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B:
Fb = ~0;
break;
}
m = GENERIC (s, d, 0, GET_COMP (Fa, 0), GET_COMP (Fb, 0), t, u, v);
n = GENERIC (s, d, G_SHIFT, GET_COMP (Fa, G_SHIFT), GET_COMP (Fb, G_SHIFT), t, u, v);
o = GENERIC (s, d, R_SHIFT, GET_COMP (Fa, R_SHIFT), GET_COMP (Fb, R_SHIFT), t, u, v);
p = GENERIC (s, d, A_SHIFT, GET_COMP (Fa, A_SHIFT), GET_COMP (Fb, A_SHIFT), t, u, v);
 
s = m | n | o | p;
 
*(dest + i) = s;
}
}
 
static void
combine_disjoint_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_OVER);
}
 
static void
combine_disjoint_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_disjoint_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_disjoint_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_disjoint_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_disjoint_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_disjoint_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_disjoint_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_disjoint_general_ca (dest, src, mask, width, COMBINE_XOR);
}
 
static void
combine_conjoint_general_ca (uint64_t * dest,
const uint64_t *src,
const uint64_t *mask,
int width,
uint16_t combine)
{
int i;
 
for (i = 0; i < width; ++i)
{
uint64_t s, d;
uint64_t m, n, o, p;
uint64_t Fa, Fb;
uint32_t t, u, v;
uint64_t sa;
uint16_t da;
 
s = *(src + i);
m = *(mask + i);
d = *(dest + i);
da = d >> A_SHIFT;
 
combine_mask_ca (&s, &m);
 
sa = m;
 
switch (combine & COMBINE_A)
{
default:
Fa = 0;
break;
 
case COMBINE_A_OUT:
m = (uint64_t)combine_conjoint_out_part ((uint16_t) (sa >> 0), da);
n = (uint64_t)combine_conjoint_out_part ((uint16_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint64_t)combine_conjoint_out_part ((uint16_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint64_t)combine_conjoint_out_part ((uint16_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A_IN:
m = (uint64_t)combine_conjoint_in_part ((uint16_t) (sa >> 0), da);
n = (uint64_t)combine_conjoint_in_part ((uint16_t) (sa >> G_SHIFT), da) << G_SHIFT;
o = (uint64_t)combine_conjoint_in_part ((uint16_t) (sa >> R_SHIFT), da) << R_SHIFT;
p = (uint64_t)combine_conjoint_in_part ((uint16_t) (sa >> A_SHIFT), da) << A_SHIFT;
Fa = m | n | o | p;
break;
 
case COMBINE_A:
Fa = ~0;
break;
}
 
switch (combine & COMBINE_B)
{
default:
Fb = 0;
break;
 
case COMBINE_B_OUT:
m = (uint64_t)combine_conjoint_out_part (da, (uint16_t) (sa >> 0));
n = (uint64_t)combine_conjoint_out_part (da, (uint16_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint64_t)combine_conjoint_out_part (da, (uint16_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint64_t)combine_conjoint_out_part (da, (uint16_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B_IN:
m = (uint64_t)combine_conjoint_in_part (da, (uint16_t) (sa >> 0));
n = (uint64_t)combine_conjoint_in_part (da, (uint16_t) (sa >> G_SHIFT)) << G_SHIFT;
o = (uint64_t)combine_conjoint_in_part (da, (uint16_t) (sa >> R_SHIFT)) << R_SHIFT;
p = (uint64_t)combine_conjoint_in_part (da, (uint16_t) (sa >> A_SHIFT)) << A_SHIFT;
Fb = m | n | o | p;
break;
 
case COMBINE_B:
Fb = ~0;
break;
}
m = GENERIC (s, d, 0, GET_COMP (Fa, 0), GET_COMP (Fb, 0), t, u, v);
n = GENERIC (s, d, G_SHIFT, GET_COMP (Fa, G_SHIFT), GET_COMP (Fb, G_SHIFT), t, u, v);
o = GENERIC (s, d, R_SHIFT, GET_COMP (Fa, R_SHIFT), GET_COMP (Fb, R_SHIFT), t, u, v);
p = GENERIC (s, d, A_SHIFT, GET_COMP (Fa, A_SHIFT), GET_COMP (Fb, A_SHIFT), t, u, v);
 
s = m | n | o | p;
 
*(dest + i) = s;
}
}
 
static void
combine_conjoint_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_OVER);
}
 
static void
combine_conjoint_over_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_OVER);
}
 
static void
combine_conjoint_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_IN);
}
 
static void
combine_conjoint_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_IN);
}
 
static void
combine_conjoint_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_OUT);
}
 
static void
combine_conjoint_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_OUT);
}
 
static void
combine_conjoint_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_ATOP);
}
 
static void
combine_conjoint_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_ATOP);
}
 
static void
combine_conjoint_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
combine_conjoint_general_ca (dest, src, mask, width, COMBINE_XOR);
}
 
void
_pixman_setup_combiner_functions_64 (pixman_implementation_t *imp)
{
/* Unified alpha */
imp->combine_64[PIXMAN_OP_CLEAR] = combine_clear;
imp->combine_64[PIXMAN_OP_SRC] = combine_src_u;
imp->combine_64[PIXMAN_OP_DST] = combine_dst;
imp->combine_64[PIXMAN_OP_OVER] = combine_over_u;
imp->combine_64[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_u;
imp->combine_64[PIXMAN_OP_IN] = combine_in_u;
imp->combine_64[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_u;
imp->combine_64[PIXMAN_OP_OUT] = combine_out_u;
imp->combine_64[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_u;
imp->combine_64[PIXMAN_OP_ATOP] = combine_atop_u;
imp->combine_64[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_u;
imp->combine_64[PIXMAN_OP_XOR] = combine_xor_u;
imp->combine_64[PIXMAN_OP_ADD] = combine_add_u;
imp->combine_64[PIXMAN_OP_SATURATE] = combine_saturate_u;
 
/* Disjoint, unified */
imp->combine_64[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear;
imp->combine_64[PIXMAN_OP_DISJOINT_SRC] = combine_src_u;
imp->combine_64[PIXMAN_OP_DISJOINT_DST] = combine_dst;
imp->combine_64[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_u;
imp->combine_64[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_u;
imp->combine_64[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_u;
imp->combine_64[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_u;
imp->combine_64[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_u;
imp->combine_64[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_u;
imp->combine_64[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_u;
imp->combine_64[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_u;
imp->combine_64[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_u;
 
/* Conjoint, unified */
imp->combine_64[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear;
imp->combine_64[PIXMAN_OP_CONJOINT_SRC] = combine_src_u;
imp->combine_64[PIXMAN_OP_CONJOINT_DST] = combine_dst;
imp->combine_64[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_u;
imp->combine_64[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_u;
imp->combine_64[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_u;
imp->combine_64[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_u;
imp->combine_64[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_u;
imp->combine_64[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_u;
imp->combine_64[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_u;
imp->combine_64[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_u;
imp->combine_64[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_u;
 
imp->combine_64[PIXMAN_OP_MULTIPLY] = combine_multiply_u;
imp->combine_64[PIXMAN_OP_SCREEN] = combine_screen_u;
imp->combine_64[PIXMAN_OP_OVERLAY] = combine_overlay_u;
imp->combine_64[PIXMAN_OP_DARKEN] = combine_darken_u;
imp->combine_64[PIXMAN_OP_LIGHTEN] = combine_lighten_u;
imp->combine_64[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_u;
imp->combine_64[PIXMAN_OP_COLOR_BURN] = combine_color_burn_u;
imp->combine_64[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_u;
imp->combine_64[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_u;
imp->combine_64[PIXMAN_OP_DIFFERENCE] = combine_difference_u;
imp->combine_64[PIXMAN_OP_EXCLUSION] = combine_exclusion_u;
imp->combine_64[PIXMAN_OP_HSL_HUE] = combine_hsl_hue_u;
imp->combine_64[PIXMAN_OP_HSL_SATURATION] = combine_hsl_saturation_u;
imp->combine_64[PIXMAN_OP_HSL_COLOR] = combine_hsl_color_u;
imp->combine_64[PIXMAN_OP_HSL_LUMINOSITY] = combine_hsl_luminosity_u;
 
/* Component alpha combiners */
imp->combine_64_ca[PIXMAN_OP_CLEAR] = combine_clear_ca;
imp->combine_64_ca[PIXMAN_OP_SRC] = combine_src_ca;
/* dest */
imp->combine_64_ca[PIXMAN_OP_OVER] = combine_over_ca;
imp->combine_64_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_IN] = combine_in_ca;
imp->combine_64_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_OUT] = combine_out_ca;
imp->combine_64_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_ATOP] = combine_atop_ca;
imp->combine_64_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_XOR] = combine_xor_ca;
imp->combine_64_ca[PIXMAN_OP_ADD] = combine_add_ca;
imp->combine_64_ca[PIXMAN_OP_SATURATE] = combine_saturate_ca;
 
/* Disjoint CA */
imp->combine_64_ca[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_SRC] = combine_src_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_DST] = combine_dst;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_ca;
 
/* Conjoint CA */
imp->combine_64_ca[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_SRC] = combine_src_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_DST] = combine_dst;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_ca;
imp->combine_64_ca[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_ca;
 
imp->combine_64_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_ca;
imp->combine_64_ca[PIXMAN_OP_SCREEN] = combine_screen_ca;
imp->combine_64_ca[PIXMAN_OP_OVERLAY] = combine_overlay_ca;
imp->combine_64_ca[PIXMAN_OP_DARKEN] = combine_darken_ca;
imp->combine_64_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_ca;
imp->combine_64_ca[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_ca;
imp->combine_64_ca[PIXMAN_OP_COLOR_BURN] = combine_color_burn_ca;
imp->combine_64_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_ca;
imp->combine_64_ca[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_ca;
imp->combine_64_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_ca;
imp->combine_64_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_ca;
 
/* It is not clear that these make sense, so make them noops for now */
imp->combine_64_ca[PIXMAN_OP_HSL_HUE] = combine_dst;
imp->combine_64_ca[PIXMAN_OP_HSL_SATURATION] = combine_dst;
imp->combine_64_ca[PIXMAN_OP_HSL_COLOR] = combine_dst;
imp->combine_64_ca[PIXMAN_OP_HSL_LUMINOSITY] = combine_dst;
}
 
/programs/develop/libraries/pixman/pixman-combine64.h
0,0 → 1,230
/* WARNING: This file is generated by combine.pl from combine.inc.
Please edit one of those files rather than this one. */
 
#line 1 "pixman-combine.c.template"
 
#define COMPONENT_SIZE 16
#define MASK 0xffffULL
#define ONE_HALF 0x8000ULL
 
#define A_SHIFT 16 * 3
#define R_SHIFT 16 * 2
#define G_SHIFT 16
#define A_MASK 0xffff000000000000ULL
#define R_MASK 0xffff00000000ULL
#define G_MASK 0xffff0000ULL
 
#define RB_MASK 0xffff0000ffffULL
#define AG_MASK 0xffff0000ffff0000ULL
#define RB_ONE_HALF 0x800000008000ULL
#define RB_MASK_PLUS_ONE 0x10000000010000ULL
 
#define ALPHA_16(x) ((x) >> A_SHIFT)
#define RED_16(x) (((x) >> R_SHIFT) & MASK)
#define GREEN_16(x) (((x) >> G_SHIFT) & MASK)
#define BLUE_16(x) ((x) & MASK)
 
/*
* Helper macros.
*/
 
#define MUL_UN16(a, b, t) \
((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
 
#define DIV_UN16(a, b) \
(((uint32_t) (a) * MASK) / (b))
 
#define ADD_UN16(x, y, t) \
((t) = (x) + (y), \
(uint64_t) (uint16_t) ((t) | (0 - ((t) >> G_SHIFT))))
 
#define DIV_ONE_UN16(x) \
(((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
 
/*
* The methods below use some tricks to be able to do two color
* components at the same time.
*/
 
/*
* x_rb = (x_rb * a) / 255
*/
#define UN16_rb_MUL_UN16(x, a, t) \
do \
{ \
t = ((x) & RB_MASK) * (a); \
t += RB_ONE_HALF; \
x = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
x &= RB_MASK; \
} while (0)
 
/*
* x_rb = min (x_rb + y_rb, 255)
*/
#define UN16_rb_ADD_UN16_rb(x, y, t) \
do \
{ \
t = ((x) + (y)); \
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK); \
x = (t & RB_MASK); \
} while (0)
 
/*
* x_rb = (x_rb * a_rb) / 255
*/
#define UN16_rb_MUL_UN16_rb(x, a, t) \
do \
{ \
t = (x & MASK) * (a & MASK); \
t |= (x & R_MASK) * ((a >> R_SHIFT) & MASK); \
t += RB_ONE_HALF; \
t = (t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT; \
x = t & RB_MASK; \
} while (0)
 
/*
* x_c = (x_c * a) / 255
*/
#define UN16x4_MUL_UN16(x, a) \
do \
{ \
uint64_t r1__, r2__, t__; \
\
r1__ = (x); \
UN16_rb_MUL_UN16 (r1__, (a), t__); \
\
r2__ = (x) >> G_SHIFT; \
UN16_rb_MUL_UN16 (r2__, (a), t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a) / 255 + y_c
*/
#define UN16x4_MUL_UN16_ADD_UN16x4(x, a, y) \
do \
{ \
uint64_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (y) & RB_MASK; \
UN16_rb_MUL_UN16 (r1__, (a), t__); \
UN16_rb_ADD_UN16_rb (r1__, r2__, t__); \
\
r2__ = (x) >> G_SHIFT; \
r3__ = ((y) >> G_SHIFT) & RB_MASK; \
UN16_rb_MUL_UN16 (r2__, (a), t__); \
UN16_rb_ADD_UN16_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a + y_c * b) / 255
*/
#define UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16(x, a, y, b) \
do \
{ \
uint64_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (y); \
UN16_rb_MUL_UN16 (r1__, (a), t__); \
UN16_rb_MUL_UN16 (r2__, (b), t__); \
UN16_rb_ADD_UN16_rb (r1__, r2__, t__); \
\
r2__ = ((x) >> G_SHIFT); \
r3__ = ((y) >> G_SHIFT); \
UN16_rb_MUL_UN16 (r2__, (a), t__); \
UN16_rb_MUL_UN16 (r3__, (b), t__); \
UN16_rb_ADD_UN16_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a_c) / 255
*/
#define UN16x4_MUL_UN16x4(x, a) \
do \
{ \
uint64_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (a); \
UN16_rb_MUL_UN16_rb (r1__, r2__, t__); \
\
r2__ = (x) >> G_SHIFT; \
r3__ = (a) >> G_SHIFT; \
UN16_rb_MUL_UN16_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a_c) / 255 + y_c
*/
#define UN16x4_MUL_UN16x4_ADD_UN16x4(x, a, y) \
do \
{ \
uint64_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (a); \
UN16_rb_MUL_UN16_rb (r1__, r2__, t__); \
r2__ = (y) & RB_MASK; \
UN16_rb_ADD_UN16_rb (r1__, r2__, t__); \
\
r2__ = ((x) >> G_SHIFT); \
r3__ = ((a) >> G_SHIFT); \
UN16_rb_MUL_UN16_rb (r2__, r3__, t__); \
r3__ = ((y) >> G_SHIFT) & RB_MASK; \
UN16_rb_ADD_UN16_rb (r2__, r3__, t__); \
\
(x) = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
* x_c = (x_c * a_c + y_c * b) / 255
*/
#define UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16(x, a, y, b) \
do \
{ \
uint64_t r1__, r2__, r3__, t__; \
\
r1__ = (x); \
r2__ = (a); \
UN16_rb_MUL_UN16_rb (r1__, r2__, t__); \
r2__ = (y); \
UN16_rb_MUL_UN16 (r2__, (b), t__); \
UN16_rb_ADD_UN16_rb (r1__, r2__, t__); \
\
r2__ = (x) >> G_SHIFT; \
r3__ = (a) >> G_SHIFT; \
UN16_rb_MUL_UN16_rb (r2__, r3__, t__); \
r3__ = (y) >> G_SHIFT; \
UN16_rb_MUL_UN16 (r3__, (b), t__); \
UN16_rb_ADD_UN16_rb (r2__, r3__, t__); \
\
x = r1__ | (r2__ << G_SHIFT); \
} while (0)
 
/*
x_c = min(x_c + y_c, 255)
*/
#define UN16x4_ADD_UN16x4(x, y) \
do \
{ \
uint64_t r1__, r2__, r3__, t__; \
\
r1__ = (x) & RB_MASK; \
r2__ = (y) & RB_MASK; \
UN16_rb_ADD_UN16_rb (r1__, r2__, t__); \
\
r2__ = ((x) >> G_SHIFT) & RB_MASK; \
r3__ = ((y) >> G_SHIFT) & RB_MASK; \
UN16_rb_ADD_UN16_rb (r2__, r3__, t__); \
\
x = r1__ | (r2__ << G_SHIFT); \
} while (0)
/programs/develop/libraries/pixman/pixman-compiler.h
0,0 → 1,204
/* Pixman uses some non-standard compiler features. This file ensures
* they exist
*
* The features are:
*
* FUNC must be defined to expand to the current function
* PIXMAN_EXPORT should be defined to whatever is required to
* export functions from a shared library
* limits limits for various types must be defined
* inline must be defined
* force_inline must be defined
*/
#if defined (__GNUC__)
# define FUNC ((const char*) (__PRETTY_FUNCTION__))
#elif defined (__sun) || (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L)
# define FUNC ((const char*) (__func__))
#else
# define FUNC ((const char*) ("???"))
#endif
 
#ifndef INT16_MIN
# define INT16_MIN (-32767-1)
#endif
 
#ifndef INT16_MAX
# define INT16_MAX (32767)
#endif
 
#ifndef INT32_MIN
# define INT32_MIN (-2147483647-1)
#endif
 
#ifndef INT32_MAX
# define INT32_MAX (2147483647)
#endif
 
#ifndef UINT32_MIN
# define UINT32_MIN (0)
#endif
 
#ifndef UINT32_MAX
# define UINT32_MAX (4294967295U)
#endif
 
#ifndef M_PI
# define M_PI 3.14159265358979323846
#endif
 
#ifdef _MSC_VER
/* 'inline' is available only in C++ in MSVC */
# define inline __inline
# define force_inline __forceinline
# define noinline __declspec(noinline)
#elif defined __GNUC__ || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define inline __inline__
# define force_inline __inline__ __attribute__ ((__always_inline__))
# define noinline __attribute__((noinline))
#else
# ifndef force_inline
# define force_inline inline
# endif
# ifndef noinline
# define noinline
# endif
#endif
 
/* GCC visibility */
#if defined(__GNUC__) && __GNUC__ >= 4 && !defined(_WIN32)
# define PIXMAN_EXPORT __attribute__ ((visibility("default")))
/* Sun Studio 8 visibility */
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x550)
# define PIXMAN_EXPORT __global
#else
# define PIXMAN_EXPORT
#endif
 
/* TLS */
#if defined(PIXMAN_NO_TLS)
 
# define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \
static type name
# define PIXMAN_GET_THREAD_LOCAL(name) \
(&name)
 
#elif defined(TOOLCHAIN_SUPPORTS__THREAD)
 
# define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \
static __thread type name
# define PIXMAN_GET_THREAD_LOCAL(name) \
(&name)
 
#elif defined(__MINGW32__)
 
# define _NO_W32_PSEUDO_MODIFIERS
# include <windows.h>
 
# define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \
static volatile int tls_ ## name ## _initialized = 0; \
static void *tls_ ## name ## _mutex = NULL; \
static unsigned tls_ ## name ## _index; \
\
static type * \
tls_ ## name ## _alloc (void) \
{ \
type *value = calloc (1, sizeof (type)); \
if (value) \
TlsSetValue (tls_ ## name ## _index, value); \
return value; \
} \
\
static force_inline type * \
tls_ ## name ## _get (void) \
{ \
type *value; \
if (!tls_ ## name ## _initialized) \
{ \
if (!tls_ ## name ## _mutex) \
{ \
void *mutex = CreateMutexA (NULL, 0, NULL); \
if (InterlockedCompareExchangePointer ( \
&tls_ ## name ## _mutex, mutex, NULL) != NULL) \
{ \
CloseHandle (mutex); \
} \
} \
WaitForSingleObject (tls_ ## name ## _mutex, 0xFFFFFFFF); \
if (!tls_ ## name ## _initialized) \
{ \
tls_ ## name ## _index = TlsAlloc (); \
tls_ ## name ## _initialized = 1; \
} \
ReleaseMutex (tls_ ## name ## _mutex); \
} \
if (tls_ ## name ## _index == 0xFFFFFFFF) \
return NULL; \
value = TlsGetValue (tls_ ## name ## _index); \
if (!value) \
value = tls_ ## name ## _alloc (); \
return value; \
}
 
# define PIXMAN_GET_THREAD_LOCAL(name) \
tls_ ## name ## _get ()
 
#elif defined(_MSC_VER)
 
# define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \
static __declspec(thread) type name
# define PIXMAN_GET_THREAD_LOCAL(name) \
(&name)
 
#elif defined(HAVE_PTHREAD_SETSPECIFIC)
 
#include <pthread.h>
 
# define PIXMAN_DEFINE_THREAD_LOCAL(type, name) \
static pthread_once_t tls_ ## name ## _once_control = PTHREAD_ONCE_INIT; \
static pthread_key_t tls_ ## name ## _key; \
\
static void \
tls_ ## name ## _destroy_value (void *value) \
{ \
free (value); \
} \
\
static void \
tls_ ## name ## _make_key (void) \
{ \
pthread_key_create (&tls_ ## name ## _key, \
tls_ ## name ## _destroy_value); \
} \
\
static type * \
tls_ ## name ## _alloc (void) \
{ \
type *value = calloc (1, sizeof (type)); \
if (value) \
pthread_setspecific (tls_ ## name ## _key, value); \
return value; \
} \
\
static force_inline type * \
tls_ ## name ## _get (void) \
{ \
type *value = NULL; \
if (pthread_once (&tls_ ## name ## _once_control, \
tls_ ## name ## _make_key) == 0) \
{ \
value = pthread_getspecific (tls_ ## name ## _key); \
if (!value) \
value = tls_ ## name ## _alloc (); \
} \
return value; \
} \
extern int no_such_variable
 
# define PIXMAN_GET_THREAD_LOCAL(name) \
tls_ ## name ## _get ()
 
#else
 
# error "Unknown thread local support for this system. Pixman will not work with multiple threads. Define PIXMAN_NO_TLS to acknowledge and accept this limitation and compile pixman without thread-safety support."
 
#endif
/programs/develop/libraries/pixman/pixman-conical-gradient.c
0,0 → 1,198
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <stdlib.h>
#include <math.h>
#include "pixman-private.h"
 
static force_inline double
coordinates_to_parameter (double x, double y, double angle)
{
double t;
 
t = atan2 (y, x) + angle;
 
while (t < 0)
t += 2 * M_PI;
 
while (t >= 2 * M_PI)
t -= 2 * M_PI;
 
return 1 - t * (1 / (2 * M_PI)); /* Scale t to [0, 1] and
* make rotation CCW
*/
}
 
static void
conical_gradient_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
source_image_t *source = (source_image_t *)image;
gradient_t *gradient = (gradient_t *)source;
conical_gradient_t *conical = (conical_gradient_t *)image;
uint32_t *end = buffer + width;
pixman_gradient_walker_t walker;
pixman_bool_t affine = TRUE;
double cx = 1.;
double cy = 0.;
double cz = 0.;
double rx = x + 0.5;
double ry = y + 0.5;
double rz = 1.;
 
_pixman_gradient_walker_init (&walker, gradient, source->common.repeat);
 
if (source->common.transform)
{
pixman_vector_t v;
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (!pixman_transform_point_3d (source->common.transform, &v))
return;
 
cx = source->common.transform->matrix[0][0] / 65536.;
cy = source->common.transform->matrix[1][0] / 65536.;
cz = source->common.transform->matrix[2][0] / 65536.;
 
rx = v.vector[0] / 65536.;
ry = v.vector[1] / 65536.;
rz = v.vector[2] / 65536.;
 
affine =
source->common.transform->matrix[2][0] == 0 &&
v.vector[2] == pixman_fixed_1;
}
 
if (affine)
{
rx -= conical->center.x / 65536.;
ry -= conical->center.y / 65536.;
 
while (buffer < end)
{
if (!mask || *mask++)
{
double t = coordinates_to_parameter (rx, ry, conical->angle);
 
*buffer = _pixman_gradient_walker_pixel (
&walker, (pixman_fixed_48_16_t)pixman_double_to_fixed (t));
}
 
++buffer;
 
rx += cx;
ry += cy;
}
}
else
{
while (buffer < end)
{
double x, y;
 
if (!mask || *mask++)
{
double t;
 
if (rz != 0)
{
x = rx / rz;
y = ry / rz;
}
else
{
x = y = 0.;
}
 
x -= conical->center.x / 65536.;
y -= conical->center.y / 65536.;
 
t = coordinates_to_parameter (x, y, conical->angle);
 
*buffer = _pixman_gradient_walker_pixel (
&walker, (pixman_fixed_48_16_t)pixman_double_to_fixed (t));
}
 
++buffer;
 
rx += cx;
ry += cy;
rz += cz;
}
}
}
 
static void
conical_gradient_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = conical_gradient_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
 
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_conical_gradient (pixman_point_fixed_t * center,
pixman_fixed_t angle,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image = _pixman_image_allocate ();
conical_gradient_t *conical;
 
if (!image)
return NULL;
 
conical = &image->conical;
 
if (!_pixman_init_gradient (&conical->common, stops, n_stops))
{
free (image);
return NULL;
}
 
angle = MOD (angle, pixman_int_to_fixed (360));
 
image->type = CONICAL;
 
conical->center = *center;
conical->angle = (pixman_fixed_to_double (angle) / 180.0) * M_PI;
 
image->common.property_changed = conical_gradient_property_changed;
 
return image;
}
 
/programs/develop/libraries/pixman/pixman-cpu.c
0,0 → 1,598
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <string.h>
 
#if defined(USE_ARM_SIMD) && defined(_MSC_VER)
/* Needed for EXCEPTION_ILLEGAL_INSTRUCTION */
#include <windows.h>
#endif
 
#include "pixman-private.h"
 
#ifdef USE_VMX
 
/* The CPU detection code needs to be in a file not compiled with
* "-maltivec -mabi=altivec", as gcc would try to save vector register
* across function calls causing SIGILL on cpus without Altivec/vmx.
*/
static pixman_bool_t initialized = FALSE;
static volatile pixman_bool_t have_vmx = TRUE;
 
#ifdef __APPLE__
#include <sys/sysctl.h>
 
static pixman_bool_t
pixman_have_vmx (void)
{
if (!initialized)
{
size_t length = sizeof(have_vmx);
int error =
sysctlbyname ("hw.optional.altivec", &have_vmx, &length, NULL, 0);
 
if (error)
have_vmx = FALSE;
 
initialized = TRUE;
}
return have_vmx;
}
 
#elif defined (__OpenBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
 
static pixman_bool_t
pixman_have_vmx (void)
{
if (!initialized)
{
int mib[2] = { CTL_MACHDEP, CPU_ALTIVEC };
size_t length = sizeof(have_vmx);
int error =
sysctl (mib, 2, &have_vmx, &length, NULL, 0);
 
if (error != 0)
have_vmx = FALSE;
 
initialized = TRUE;
}
return have_vmx;
}
 
#elif defined (__linux__)
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <linux/auxvec.h>
#include <asm/cputable.h>
 
static pixman_bool_t
pixman_have_vmx (void)
{
if (!initialized)
{
char fname[64];
unsigned long buf[64];
ssize_t count = 0;
pid_t pid;
int fd, i;
 
pid = getpid ();
snprintf (fname, sizeof(fname) - 1, "/proc/%d/auxv", pid);
 
fd = open (fname, O_RDONLY);
if (fd >= 0)
{
for (i = 0; i <= (count / sizeof(unsigned long)); i += 2)
{
/* Read more if buf is empty... */
if (i == (count / sizeof(unsigned long)))
{
count = read (fd, buf, sizeof(buf));
if (count <= 0)
break;
i = 0;
}
 
if (buf[i] == AT_HWCAP)
{
have_vmx = !!(buf[i + 1] & PPC_FEATURE_HAS_ALTIVEC);
initialized = TRUE;
break;
}
else if (buf[i] == AT_NULL)
{
break;
}
}
close (fd);
}
}
if (!initialized)
{
/* Something went wrong. Assume 'no' rather than playing
fragile tricks with catching SIGILL. */
have_vmx = FALSE;
initialized = TRUE;
}
 
return have_vmx;
}
 
#else /* !__APPLE__ && !__OpenBSD__ && !__linux__ */
#include <signal.h>
#include <setjmp.h>
 
static jmp_buf jump_env;
 
static void
vmx_test (int sig,
siginfo_t *si,
void * unused)
{
longjmp (jump_env, 1);
}
 
static pixman_bool_t
pixman_have_vmx (void)
{
struct sigaction sa, osa;
int jmp_result;
 
if (!initialized)
{
sa.sa_flags = SA_SIGINFO;
sigemptyset (&sa.sa_mask);
sa.sa_sigaction = vmx_test;
sigaction (SIGILL, &sa, &osa);
jmp_result = setjmp (jump_env);
if (jmp_result == 0)
{
asm volatile ( "vor 0, 0, 0" );
}
sigaction (SIGILL, &osa, NULL);
have_vmx = (jmp_result == 0);
initialized = TRUE;
}
return have_vmx;
}
 
#endif /* __APPLE__ */
#endif /* USE_VMX */
 
#if defined(USE_ARM_SIMD) || defined(USE_ARM_NEON)
 
#if defined(_MSC_VER)
 
#if defined(USE_ARM_SIMD)
extern int pixman_msvc_try_arm_simd_op ();
 
pixman_bool_t
pixman_have_arm_simd (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t have_arm_simd = FALSE;
 
if (!initialized)
{
__try {
pixman_msvc_try_arm_simd_op ();
have_arm_simd = TRUE;
} __except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION) {
have_arm_simd = FALSE;
}
initialized = TRUE;
}
 
return have_arm_simd;
}
 
#endif /* USE_ARM_SIMD */
 
#if defined(USE_ARM_NEON)
extern int pixman_msvc_try_arm_neon_op ();
 
pixman_bool_t
pixman_have_arm_neon (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t have_arm_neon = FALSE;
 
if (!initialized)
{
__try
{
pixman_msvc_try_arm_neon_op ();
have_arm_neon = TRUE;
}
__except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION)
{
have_arm_neon = FALSE;
}
initialized = TRUE;
}
 
return have_arm_neon;
}
 
#endif /* USE_ARM_NEON */
 
#else /* linux ELF */
 
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <elf.h>
 
static pixman_bool_t arm_has_v7 = FALSE;
static pixman_bool_t arm_has_v6 = FALSE;
static pixman_bool_t arm_has_vfp = FALSE;
static pixman_bool_t arm_has_neon = FALSE;
static pixman_bool_t arm_has_iwmmxt = FALSE;
static pixman_bool_t arm_tests_initialized = FALSE;
 
static void
pixman_arm_read_auxv ()
{
int fd;
Elf32_auxv_t aux;
 
fd = open ("/proc/self/auxv", O_RDONLY);
if (fd >= 0)
{
while (read (fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t))
{
if (aux.a_type == AT_HWCAP)
{
uint32_t hwcap = aux.a_un.a_val;
/* hardcode these values to avoid depending on specific
* versions of the hwcap header, e.g. HWCAP_NEON
*/
arm_has_vfp = (hwcap & 64) != 0;
arm_has_iwmmxt = (hwcap & 512) != 0;
/* this flag is only present on kernel 2.6.29 */
arm_has_neon = (hwcap & 4096) != 0;
}
else if (aux.a_type == AT_PLATFORM)
{
const char *plat = (const char*) aux.a_un.a_val;
if (strncmp (plat, "v7l", 3) == 0)
{
arm_has_v7 = TRUE;
arm_has_v6 = TRUE;
}
else if (strncmp (plat, "v6l", 3) == 0)
{
arm_has_v6 = TRUE;
}
}
}
close (fd);
}
 
arm_tests_initialized = TRUE;
}
 
#if defined(USE_ARM_SIMD)
pixman_bool_t
pixman_have_arm_simd (void)
{
if (!arm_tests_initialized)
pixman_arm_read_auxv ();
 
return arm_has_v6;
}
 
#endif /* USE_ARM_SIMD */
 
#if defined(USE_ARM_NEON)
pixman_bool_t
pixman_have_arm_neon (void)
{
if (!arm_tests_initialized)
pixman_arm_read_auxv ();
 
return arm_has_neon;
}
 
#endif /* USE_ARM_NEON */
 
#endif /* linux */
 
#endif /* USE_ARM_SIMD || USE_ARM_NEON */
 
#if defined(USE_MMX) || defined(USE_SSE2)
/* The CPU detection code needs to be in a file not compiled with
* "-mmmx -msse", as gcc would generate CMOV instructions otherwise
* that would lead to SIGILL instructions on old CPUs that don't have
* it.
*/
#if !defined(__amd64__) && !defined(__x86_64__) && !defined(_M_AMD64)
 
#ifdef HAVE_GETISAX
#include <sys/auxv.h>
#endif
 
typedef enum
{
NO_FEATURES = 0,
MMX = 0x1,
MMX_EXTENSIONS = 0x2,
SSE = 0x6,
SSE2 = 0x8,
CMOV = 0x10
} cpu_features_t;
 
 
static unsigned int
detect_cpu_features (void)
{
unsigned int features = 0;
unsigned int result = 0;
 
#ifdef HAVE_GETISAX
if (getisax (&result, 1))
{
if (result & AV_386_CMOV)
features |= CMOV;
if (result & AV_386_MMX)
features |= MMX;
if (result & AV_386_AMD_MMX)
features |= MMX_EXTENSIONS;
if (result & AV_386_SSE)
features |= SSE;
if (result & AV_386_SSE2)
features |= SSE2;
}
#else
char vendor[13];
#ifdef _MSC_VER
int vendor0 = 0, vendor1, vendor2;
#endif
vendor[0] = 0;
vendor[12] = 0;
 
#ifdef __GNUC__
/* see p. 118 of amd64 instruction set manual Vol3 */
/* We need to be careful about the handling of %ebx and
* %esp here. We can't declare either one as clobbered
* since they are special registers (%ebx is the "PIC
* register" holding an offset to global data, %esp the
* stack pointer), so we need to make sure they have their
* original values when we access the output operands.
*/
__asm__ (
"pushf\n"
"pop %%eax\n"
"mov %%eax, %%ecx\n"
"xor $0x00200000, %%eax\n"
"push %%eax\n"
"popf\n"
"pushf\n"
"pop %%eax\n"
"mov $0x0, %%edx\n"
"xor %%ecx, %%eax\n"
"jz 1f\n"
 
"mov $0x00000000, %%eax\n"
"push %%ebx\n"
"cpuid\n"
"mov %%ebx, %%eax\n"
"pop %%ebx\n"
"mov %%eax, %1\n"
"mov %%edx, %2\n"
"mov %%ecx, %3\n"
"mov $0x00000001, %%eax\n"
"push %%ebx\n"
"cpuid\n"
"pop %%ebx\n"
"1:\n"
"mov %%edx, %0\n"
: "=r" (result),
"=m" (vendor[0]),
"=m" (vendor[4]),
"=m" (vendor[8])
:
: "%eax", "%ecx", "%edx"
);
 
#elif defined (_MSC_VER)
 
_asm {
pushfd
pop eax
mov ecx, eax
xor eax, 00200000h
push eax
popfd
pushfd
pop eax
mov edx, 0
xor eax, ecx
jz nocpuid
 
mov eax, 0
push ebx
cpuid
mov eax, ebx
pop ebx
mov vendor0, eax
mov vendor1, edx
mov vendor2, ecx
mov eax, 1
push ebx
cpuid
pop ebx
nocpuid:
mov result, edx
}
memmove (vendor + 0, &vendor0, 4);
memmove (vendor + 4, &vendor1, 4);
memmove (vendor + 8, &vendor2, 4);
 
#else
# error unsupported compiler
#endif
 
features = 0;
if (result)
{
/* result now contains the standard feature bits */
if (result & (1 << 15))
features |= CMOV;
if (result & (1 << 23))
features |= MMX;
if (result & (1 << 25))
features |= SSE;
if (result & (1 << 26))
features |= SSE2;
if ((features & MMX) && !(features & SSE) &&
(strcmp (vendor, "AuthenticAMD") == 0 ||
strcmp (vendor, "Geode by NSC") == 0))
{
/* check for AMD MMX extensions */
#ifdef __GNUC__
__asm__ (
" push %%ebx\n"
" mov $0x80000000, %%eax\n"
" cpuid\n"
" xor %%edx, %%edx\n"
" cmp $0x1, %%eax\n"
" jge 2f\n"
" mov $0x80000001, %%eax\n"
" cpuid\n"
"2:\n"
" pop %%ebx\n"
" mov %%edx, %0\n"
: "=r" (result)
:
: "%eax", "%ecx", "%edx"
);
#elif defined _MSC_VER
_asm {
push ebx
mov eax, 80000000h
cpuid
xor edx, edx
cmp eax, 1
jge notamd
mov eax, 80000001h
cpuid
notamd:
pop ebx
mov result, edx
}
#endif
if (result & (1 << 22))
features |= MMX_EXTENSIONS;
}
}
#endif /* HAVE_GETISAX */
 
return features;
}
 
static pixman_bool_t
pixman_have_mmx (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t mmx_present;
 
if (!initialized)
{
unsigned int features = detect_cpu_features ();
mmx_present = (features & (MMX | MMX_EXTENSIONS)) == (MMX | MMX_EXTENSIONS);
initialized = TRUE;
}
 
return mmx_present;
}
 
#ifdef USE_SSE2
static pixman_bool_t
pixman_have_sse2 (void)
{
static pixman_bool_t initialized = FALSE;
static pixman_bool_t sse2_present;
 
if (!initialized)
{
unsigned int features = detect_cpu_features ();
sse2_present = (features & (MMX | MMX_EXTENSIONS | SSE | SSE2)) == (MMX | MMX_EXTENSIONS | SSE | SSE2);
initialized = TRUE;
}
 
return sse2_present;
}
 
#endif
 
#else /* __amd64__ */
#ifdef USE_MMX
#define pixman_have_mmx() TRUE
#endif
#ifdef USE_SSE2
#define pixman_have_sse2() TRUE
#endif
#endif /* __amd64__ */
#endif
 
pixman_implementation_t *
_pixman_choose_implementation (void)
{
#ifdef USE_SSE2
if (pixman_have_sse2 ())
return _pixman_implementation_create_sse2 ();
#endif
#ifdef USE_MMX
if (pixman_have_mmx ())
return _pixman_implementation_create_mmx ();
#endif
 
#ifdef USE_ARM_NEON
if (pixman_have_arm_neon ())
return _pixman_implementation_create_arm_neon ();
#endif
#ifdef USE_ARM_SIMD
if (pixman_have_arm_simd ())
return _pixman_implementation_create_arm_simd ();
#endif
#ifdef USE_VMX
if (pixman_have_vmx ())
return _pixman_implementation_create_vmx ();
#endif
 
return _pixman_implementation_create_fast_path ();
}
 
/programs/develop/libraries/pixman/pixman-edge-accessors.c
0,0 → 1,4
 
#define PIXMAN_FB_ACCESSORS
 
#include "pixman-edge.c"
/programs/develop/libraries/pixman/pixman-edge-imp.h
0,0 → 1,182
/*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifndef rasterize_span
#endif
 
static void
RASTERIZE_EDGES (pixman_image_t *image,
pixman_edge_t *l,
pixman_edge_t *r,
pixman_fixed_t t,
pixman_fixed_t b)
{
pixman_fixed_t y = t;
uint32_t *line;
uint32_t *buf = (image)->bits.bits;
int stride = (image)->bits.rowstride;
int width = (image)->bits.width;
 
line = buf + pixman_fixed_to_int (y) * stride;
 
for (;;)
{
pixman_fixed_t lx;
pixman_fixed_t rx;
int lxi;
int rxi;
 
lx = l->x;
rx = r->x;
#if N_BITS == 1
/* For the non-antialiased case, round the coordinates up, in effect
* sampling just slightly to the left of the pixel. This is so that
* when the sample point lies exactly on the line, we round towards
* north-west.
*
* (The AA case does a similar adjustment in RENDER_SAMPLES_X)
*/
lx += X_FRAC_FIRST(1) - pixman_fixed_e;
rx += X_FRAC_FIRST(1) - pixman_fixed_e;
#endif
/* clip X */
if (lx < 0)
lx = 0;
if (pixman_fixed_to_int (rx) >= width)
#if N_BITS == 1
rx = pixman_int_to_fixed (width);
#else
/* Use the last pixel of the scanline, covered 100%.
* We can't use the first pixel following the scanline,
* because accessing it could result in a buffer overrun.
*/
rx = pixman_int_to_fixed (width) - 1;
#endif
 
/* Skip empty (or backwards) sections */
if (rx > lx)
{
 
/* Find pixel bounds for span */
lxi = pixman_fixed_to_int (lx);
rxi = pixman_fixed_to_int (rx);
 
#if N_BITS == 1
{
 
#define LEFT_MASK(x) \
(((x) & 0x1f) ? \
SCREEN_SHIFT_RIGHT (0xffffffff, (x) & 0x1f) : 0)
#define RIGHT_MASK(x) \
(((32 - (x)) & 0x1f) ? \
SCREEN_SHIFT_LEFT (0xffffffff, (32 - (x)) & 0x1f) : 0)
#define MASK_BITS(x,w,l,n,r) { \
n = (w); \
r = RIGHT_MASK ((x) + n); \
l = LEFT_MASK (x); \
if (l) { \
n -= 32 - ((x) & 0x1f); \
if (n < 0) { \
n = 0; \
l &= r; \
r = 0; \
} \
} \
n >>= 5; \
}
uint32_t *a = line;
uint32_t startmask;
uint32_t endmask;
int nmiddle;
int width = rxi - lxi;
int x = lxi;
a += x >> 5;
x &= 0x1f;
MASK_BITS (x, width, startmask, nmiddle, endmask);
 
if (startmask) {
WRITE(image, a, READ(image, a) | startmask);
a++;
}
while (nmiddle--)
WRITE(image, a++, 0xffffffff);
if (endmask)
WRITE(image, a, READ(image, a) | endmask);
}
#else
{
DEFINE_ALPHA(line,lxi);
int lxs;
int rxs;
 
/* Sample coverage for edge pixels */
lxs = RENDER_SAMPLES_X (lx, N_BITS);
rxs = RENDER_SAMPLES_X (rx, N_BITS);
 
/* Add coverage across row */
if (lxi == rxi)
{
ADD_ALPHA (rxs - lxs);
}
else
{
int xi;
 
ADD_ALPHA (N_X_FRAC(N_BITS) - lxs);
STEP_ALPHA;
for (xi = lxi + 1; xi < rxi; xi++)
{
ADD_ALPHA (N_X_FRAC(N_BITS));
STEP_ALPHA;
}
ADD_ALPHA (rxs);
}
}
#endif
}
 
if (y == b)
break;
 
#if N_BITS > 1
if (pixman_fixed_frac (y) != Y_FRAC_LAST(N_BITS))
{
RENDER_EDGE_STEP_SMALL (l);
RENDER_EDGE_STEP_SMALL (r);
y += STEP_Y_SMALL(N_BITS);
}
else
#endif
{
RENDER_EDGE_STEP_BIG (l);
RENDER_EDGE_STEP_BIG (r);
y += STEP_Y_BIG(N_BITS);
line += stride;
}
}
}
 
#undef rasterize_span
/programs/develop/libraries/pixman/pixman-edge.c
0,0 → 1,384
/*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <string.h>
 
#include "pixman-private.h"
#include "pixman-accessor.h"
 
/*
* Step across a small sample grid gap
*/
#define RENDER_EDGE_STEP_SMALL(edge) \
{ \
edge->x += edge->stepx_small; \
edge->e += edge->dx_small; \
if (edge->e > 0) \
{ \
edge->e -= edge->dy; \
edge->x += edge->signdx; \
} \
}
 
/*
* Step across a large sample grid gap
*/
#define RENDER_EDGE_STEP_BIG(edge) \
{ \
edge->x += edge->stepx_big; \
edge->e += edge->dx_big; \
if (edge->e > 0) \
{ \
edge->e -= edge->dy; \
edge->x += edge->signdx; \
} \
}
 
#ifdef PIXMAN_FB_ACCESSORS
#define PIXMAN_RASTERIZE_EDGES pixman_rasterize_edges_accessors
#else
#define PIXMAN_RASTERIZE_EDGES pixman_rasterize_edges_no_accessors
#endif
 
/*
* 4 bit alpha
*/
 
#define N_BITS 4
#define RASTERIZE_EDGES rasterize_edges_4
 
#ifndef WORDS_BIGENDIAN
#define SHIFT_4(o) ((o) << 2)
#else
#define SHIFT_4(o) ((1 - (o)) << 2)
#endif
 
#define GET_4(x, o) (((x) >> SHIFT_4 (o)) & 0xf)
#define PUT_4(x, o, v) \
(((x) & ~(0xf << SHIFT_4 (o))) | (((v) & 0xf) << SHIFT_4 (o)))
 
#define DEFINE_ALPHA(line, x) \
uint8_t *__ap = (uint8_t *) line + ((x) >> 1); \
int __ao = (x) & 1
 
#define STEP_ALPHA ((__ap += __ao), (__ao ^= 1))
 
#define ADD_ALPHA(a) \
{ \
uint8_t __o = READ (image, __ap); \
uint8_t __a = (a) + GET_4 (__o, __ao); \
WRITE (image, __ap, PUT_4 (__o, __ao, __a | (0 - ((__a) >> 4)))); \
}
 
#include "pixman-edge-imp.h"
 
#undef ADD_ALPHA
#undef STEP_ALPHA
#undef DEFINE_ALPHA
#undef RASTERIZE_EDGES
#undef N_BITS
 
 
/*
* 1 bit alpha
*/
 
#define N_BITS 1
#define RASTERIZE_EDGES rasterize_edges_1
 
#include "pixman-edge-imp.h"
 
#undef RASTERIZE_EDGES
#undef N_BITS
 
/*
* 8 bit alpha
*/
 
static force_inline uint8_t
clip255 (int x)
{
if (x > 255)
return 255;
 
return x;
}
 
#define ADD_SATURATE_8(buf, val, length) \
do \
{ \
int i__ = (length); \
uint8_t *buf__ = (buf); \
int val__ = (val); \
\
while (i__--) \
{ \
WRITE (image, (buf__), clip255 (READ (image, (buf__)) + (val__))); \
(buf__)++; \
} \
} while (0)
 
/*
* We want to detect the case where we add the same value to a long
* span of pixels. The triangles on the end are filled in while we
* count how many sub-pixel scanlines contribute to the middle section.
*
* +--------------------------+
* fill_height =| \ /
* +------------------+
* |================|
* fill_start fill_end
*/
static void
rasterize_edges_8 (pixman_image_t *image,
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b)
{
pixman_fixed_t y = t;
uint32_t *line;
int fill_start = -1, fill_end = -1;
int fill_size = 0;
uint32_t *buf = (image)->bits.bits;
int stride = (image)->bits.rowstride;
int width = (image)->bits.width;
 
line = buf + pixman_fixed_to_int (y) * stride;
 
for (;;)
{
uint8_t *ap = (uint8_t *) line;
pixman_fixed_t lx, rx;
int lxi, rxi;
 
/* clip X */
lx = l->x;
if (lx < 0)
lx = 0;
 
rx = r->x;
 
if (pixman_fixed_to_int (rx) >= width)
{
/* Use the last pixel of the scanline, covered 100%.
* We can't use the first pixel following the scanline,
* because accessing it could result in a buffer overrun.
*/
rx = pixman_int_to_fixed (width) - 1;
}
 
/* Skip empty (or backwards) sections */
if (rx > lx)
{
int lxs, rxs;
 
/* Find pixel bounds for span. */
lxi = pixman_fixed_to_int (lx);
rxi = pixman_fixed_to_int (rx);
 
/* Sample coverage for edge pixels */
lxs = RENDER_SAMPLES_X (lx, 8);
rxs = RENDER_SAMPLES_X (rx, 8);
 
/* Add coverage across row */
if (lxi == rxi)
{
WRITE (image, ap + lxi,
clip255 (READ (image, ap + lxi) + rxs - lxs));
}
else
{
WRITE (image, ap + lxi,
clip255 (READ (image, ap + lxi) + N_X_FRAC (8) - lxs));
 
/* Move forward so that lxi/rxi is the pixel span */
lxi++;
 
/* Don't bother trying to optimize the fill unless
* the span is longer than 4 pixels. */
if (rxi - lxi > 4)
{
if (fill_start < 0)
{
fill_start = lxi;
fill_end = rxi;
fill_size++;
}
else
{
if (lxi >= fill_end || rxi < fill_start)
{
/* We're beyond what we saved, just fill it */
ADD_SATURATE_8 (ap + fill_start,
fill_size * N_X_FRAC (8),
fill_end - fill_start);
fill_start = lxi;
fill_end = rxi;
fill_size = 1;
}
else
{
/* Update fill_start */
if (lxi > fill_start)
{
ADD_SATURATE_8 (ap + fill_start,
fill_size * N_X_FRAC (8),
lxi - fill_start);
fill_start = lxi;
}
else if (lxi < fill_start)
{
ADD_SATURATE_8 (ap + lxi, N_X_FRAC (8),
fill_start - lxi);
}
 
/* Update fill_end */
if (rxi < fill_end)
{
ADD_SATURATE_8 (ap + rxi,
fill_size * N_X_FRAC (8),
fill_end - rxi);
fill_end = rxi;
}
else if (fill_end < rxi)
{
ADD_SATURATE_8 (ap + fill_end,
N_X_FRAC (8),
rxi - fill_end);
}
fill_size++;
}
}
}
else
{
ADD_SATURATE_8 (ap + lxi, N_X_FRAC (8), rxi - lxi);
}
 
WRITE (image, ap + rxi, clip255 (READ (image, ap + rxi) + rxs));
}
}
 
if (y == b)
{
/* We're done, make sure we clean up any remaining fill. */
if (fill_start != fill_end)
{
if (fill_size == N_Y_FRAC (8))
{
MEMSET_WRAPPED (image, ap + fill_start,
0xff, fill_end - fill_start);
}
else
{
ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8),
fill_end - fill_start);
}
}
break;
}
 
if (pixman_fixed_frac (y) != Y_FRAC_LAST (8))
{
RENDER_EDGE_STEP_SMALL (l);
RENDER_EDGE_STEP_SMALL (r);
y += STEP_Y_SMALL (8);
}
else
{
RENDER_EDGE_STEP_BIG (l);
RENDER_EDGE_STEP_BIG (r);
y += STEP_Y_BIG (8);
if (fill_start != fill_end)
{
if (fill_size == N_Y_FRAC (8))
{
MEMSET_WRAPPED (image, ap + fill_start,
0xff, fill_end - fill_start);
}
else
{
ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC (8),
fill_end - fill_start);
}
fill_start = fill_end = -1;
fill_size = 0;
}
line += stride;
}
}
}
 
#ifndef PIXMAN_FB_ACCESSORS
static
#endif
void
PIXMAN_RASTERIZE_EDGES (pixman_image_t *image,
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b)
{
switch (PIXMAN_FORMAT_BPP (image->bits.format))
{
case 1:
rasterize_edges_1 (image, l, r, t, b);
break;
 
case 4:
rasterize_edges_4 (image, l, r, t, b);
break;
 
case 8:
rasterize_edges_8 (image, l, r, t, b);
break;
 
default:
break;
}
}
 
#ifndef PIXMAN_FB_ACCESSORS
 
PIXMAN_EXPORT void
pixman_rasterize_edges (pixman_image_t *image,
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b)
{
return_if_fail (image->type == BITS);
if (image->bits.read_func || image->bits.write_func)
pixman_rasterize_edges_accessors (image, l, r, t, b);
else
pixman_rasterize_edges_no_accessors (image, l, r, t, b);
}
 
#endif
/programs/develop/libraries/pixman/pixman-fast-path.c
0,0 → 1,1852
/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Keith Packard, SuSE, Inc.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <string.h>
#include <stdlib.h>
#include "pixman-private.h"
#include "pixman-combine32.h"
#include "pixman-fast-path.h"
 
static force_inline uint32_t
fetch_24 (uint8_t *a)
{
if (((unsigned long)a) & 1)
{
#ifdef WORDS_BIGENDIAN
return (*a << 16) | (*(uint16_t *)(a + 1));
#else
return *a | (*(uint16_t *)(a + 1) << 8);
#endif
}
else
{
#ifdef WORDS_BIGENDIAN
return (*(uint16_t *)a << 8) | *(a + 2);
#else
return *(uint16_t *)a | (*(a + 2) << 16);
#endif
}
}
 
static force_inline void
store_24 (uint8_t *a,
uint32_t v)
{
if (((unsigned long)a) & 1)
{
#ifdef WORDS_BIGENDIAN
*a = (uint8_t) (v >> 16);
*(uint16_t *)(a + 1) = (uint16_t) (v);
#else
*a = (uint8_t) (v);
*(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
#endif
}
else
{
#ifdef WORDS_BIGENDIAN
*(uint16_t *)a = (uint16_t)(v >> 8);
*(a + 2) = (uint8_t)v;
#else
*(uint16_t *)a = (uint16_t)v;
*(a + 2) = (uint8_t)(v >> 16);
#endif
}
}
 
static force_inline uint32_t
over (uint32_t src,
uint32_t dest)
{
uint32_t a = ~src >> 24;
 
UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src);
 
return dest;
}
 
static uint32_t
in (uint32_t x,
uint8_t y)
{
uint16_t a = y;
 
UN8x4_MUL_UN8 (x, a);
 
return x;
}
 
/*
* Naming convention:
*
* op_src_mask_dest
*/
static void
fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *src, *src_line;
uint32_t *dst, *dst_line;
uint8_t *mask, *mask_line;
int src_stride, mask_stride, dst_stride;
uint8_t m;
uint32_t s, d;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
while (height--)
{
src = src_line;
src_line += src_stride;
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
 
w = width;
while (w--)
{
m = *mask++;
if (m)
{
s = *src | 0xff000000;
 
if (m == 0xff)
{
*dst = s;
}
else
{
d = in (s, m);
*dst = over (d, *dst);
}
}
src++;
dst++;
}
}
}
 
static void
fast_composite_in_n_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dest_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask, m;
int dst_stride, mask_stride;
int32_t w;
uint16_t t;
 
src = _pixman_image_get_solid (src_image, dest_image->bits.format);
 
srca = src >> 24;
 
PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
if (srca == 0xff)
{
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
m = *mask++;
 
if (m == 0)
*dst = 0;
else if (m != 0xff)
*dst = MUL_UN8 (m, *dst, t);
 
dst++;
}
}
}
else
{
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
m = *mask++;
m = MUL_UN8 (m, srca, t);
 
if (m == 0)
*dst = 0;
else if (m != 0xff)
*dst = MUL_UN8 (m, *dst, t);
 
dst++;
}
}
}
}
 
static void
fast_composite_in_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dest_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
uint8_t s;
uint16_t t;
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
 
if (s == 0)
*dst = 0;
else if (s != 0xff)
*dst = MUL_UN8 (s, *dst, t);
 
dst++;
}
}
}
 
static void
fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint32_t *dst_line, *dst, d;
uint8_t *mask_line, *mask, m;
int dst_stride, mask_stride;
int32_t w;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
m = *mask++;
if (m == 0xff)
{
if (srca == 0xff)
*dst = src;
else
*dst = over (src, *dst);
}
else if (m)
{
d = in (src, m);
*dst = over (d, *dst);
}
dst++;
}
}
}
 
static void
fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca, s;
uint32_t *dst_line, *dst, d;
uint32_t *mask_line, *mask, ma;
int dst_stride, mask_stride;
int32_t w;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
ma = *mask++;
 
if (ma)
{
d = *dst;
s = src;
 
UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d);
 
*dst = s;
}
 
dst++;
}
}
}
 
static void
fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca, s;
uint32_t *dst_line, *dst, d;
uint32_t *mask_line, *mask, ma;
int dst_stride, mask_stride;
int32_t w;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
ma = *mask++;
if (ma == 0xffffffff)
{
if (srca == 0xff)
*dst = src;
else
*dst = over (src, *dst);
}
else if (ma)
{
d = *dst;
s = src;
 
UN8x4_MUL_UN8x4 (s, ma);
UN8x4_MUL_UN8 (ma, srca);
ma = ~ma;
UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
 
*dst = d;
}
 
dst++;
}
}
}
 
static void
fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint8_t *dst_line, *dst;
uint32_t d;
uint8_t *mask_line, *mask, m;
int dst_stride, mask_stride;
int32_t w;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
m = *mask++;
if (m == 0xff)
{
if (srca == 0xff)
{
d = src;
}
else
{
d = fetch_24 (dst);
d = over (src, d);
}
store_24 (dst, d);
}
else if (m)
{
d = over (in (src, m), fetch_24 (dst));
store_24 (dst, d);
}
dst += 3;
}
}
}
 
static void
fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint16_t *dst_line, *dst;
uint32_t d;
uint8_t *mask_line, *mask, m;
int dst_stride, mask_stride;
int32_t w;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
m = *mask++;
if (m == 0xff)
{
if (srca == 0xff)
{
d = src;
}
else
{
d = *dst;
d = over (src, CONVERT_0565_TO_0888 (d));
}
*dst = CONVERT_8888_TO_0565 (d);
}
else if (m)
{
d = *dst;
d = over (in (src, m), CONVERT_0565_TO_0888 (d));
*dst = CONVERT_8888_TO_0565 (d);
}
dst++;
}
}
}
 
static void
fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca, s;
uint16_t src16;
uint16_t *dst_line, *dst;
uint32_t d;
uint32_t *mask_line, *mask, ma;
int dst_stride, mask_stride;
int32_t w;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
src16 = CONVERT_8888_TO_0565 (src);
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
ma = *mask++;
if (ma == 0xffffffff)
{
if (srca == 0xff)
{
*dst = src16;
}
else
{
d = *dst;
d = over (src, CONVERT_0565_TO_0888 (d));
*dst = CONVERT_8888_TO_0565 (d);
}
}
else if (ma)
{
d = *dst;
d = CONVERT_0565_TO_0888 (d);
 
s = src;
 
UN8x4_MUL_UN8x4 (s, ma);
UN8x4_MUL_UN8 (ma, srca);
ma = ~ma;
UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
 
*dst = CONVERT_8888_TO_0565 (d);
}
dst++;
}
}
}
 
static void
fast_composite_over_8888_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src, s;
int dst_stride, src_stride;
uint8_t a;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
a = s >> 24;
if (a == 0xff)
*dst = s;
else if (s)
*dst = over (s, *dst);
dst++;
}
}
}
 
static void
fast_composite_src_x888_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
*dst++ = (*src++) | 0xff000000;
}
}
 
#if 0
static void
fast_composite_over_8888_0888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint32_t d;
uint32_t *src_line, *src, s;
uint8_t a;
int dst_stride, src_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
a = s >> 24;
if (a)
{
if (a == 0xff)
d = s;
else
d = over (s, fetch_24 (dst));
 
store_24 (dst, d);
}
dst += 3;
}
}
}
#endif
 
static void
fast_composite_over_8888_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint16_t *dst_line, *dst;
uint32_t d;
uint32_t *src_line, *src, s;
uint8_t a;
int dst_stride, src_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
a = s >> 24;
if (s)
{
if (a == 0xff)
{
d = s;
}
else
{
d = *dst;
d = over (s, CONVERT_0565_TO_0888 (d));
}
*dst = CONVERT_8888_TO_0565 (d);
}
dst++;
}
}
}
 
static void
fast_composite_src_x888_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint16_t *dst_line, *dst;
uint32_t *src_line, *src, s;
int dst_stride, src_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
*dst = CONVERT_8888_TO_0565 (s);
dst++;
}
}
}
 
static void
fast_composite_add_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
uint8_t s, d;
uint16_t t;
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
if (s)
{
if (s != 0xff)
{
d = *dst;
t = d + s;
s = t | (0 - (t >> 8));
}
*dst = s;
}
dst++;
}
}
}
 
static void
fast_composite_add_8888_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
uint32_t s, d;
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
if (s)
{
if (s != 0xffffffff)
{
d = *dst;
if (d)
UN8x4_ADD_UN8x4 (s, d);
}
*dst = s;
}
dst++;
}
}
}
 
static void
fast_composite_add_n_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
int32_t w;
uint32_t src;
uint8_t sa;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
sa = (src >> 24);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
while (w--)
{
uint16_t tmp;
uint16_t a;
uint32_t m, d;
uint32_t r;
 
a = *mask++;
d = *dst;
 
m = MUL_UN8 (sa, a, tmp);
r = ADD_UN8 (m, d, tmp);
 
*dst++ = r;
}
}
}
 
#ifdef WORDS_BIGENDIAN
#define CREATE_BITMASK(n) (0x80000000 >> (n))
#define UPDATE_BITMASK(n) ((n) >> 1)
#else
#define CREATE_BITMASK(n) (1 << (n))
#define UPDATE_BITMASK(n) ((n) << 1)
#endif
 
#define TEST_BIT(p, n) \
(*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31))
#define SET_BIT(p, n) \
do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0);
 
static void
fast_composite_add_1000_1000 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,
src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, 0, dest_y, uint32_t,
dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
/*
* TODO: improve performance by processing uint32_t data instead
* of individual bits
*/
if (TEST_BIT (src, src_x + w))
SET_BIT (dst, dest_x + w);
}
}
}
 
static void
fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint32_t *dst, *dst_line;
uint32_t *mask, *mask_line;
int mask_stride, dst_stride;
uint32_t bitcache, bitmask;
int32_t w;
 
if (width <= 0)
return;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
mask_stride, mask_line, 1);
mask_line += mask_x >> 5;
 
if (srca == 0xff)
{
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
bitcache = *mask++;
bitmask = CREATE_BITMASK (mask_x & 31);
 
while (w--)
{
if (bitmask == 0)
{
bitcache = *mask++;
bitmask = CREATE_BITMASK (0);
}
if (bitcache & bitmask)
*dst = src;
bitmask = UPDATE_BITMASK (bitmask);
dst++;
}
}
}
else
{
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
bitcache = *mask++;
bitmask = CREATE_BITMASK (mask_x & 31);
 
while (w--)
{
if (bitmask == 0)
{
bitcache = *mask++;
bitmask = CREATE_BITMASK (0);
}
if (bitcache & bitmask)
*dst = over (src, *dst);
bitmask = UPDATE_BITMASK (bitmask);
dst++;
}
}
}
}
 
static void
fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint16_t *dst, *dst_line;
uint32_t *mask, *mask_line;
int mask_stride, dst_stride;
uint32_t bitcache, bitmask;
int32_t w;
uint32_t d;
uint16_t src565;
 
if (width <= 0)
return;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
mask_stride, mask_line, 1);
mask_line += mask_x >> 5;
 
if (srca == 0xff)
{
src565 = CONVERT_8888_TO_0565 (src);
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
bitcache = *mask++;
bitmask = CREATE_BITMASK (mask_x & 31);
 
while (w--)
{
if (bitmask == 0)
{
bitcache = *mask++;
bitmask = CREATE_BITMASK (0);
}
if (bitcache & bitmask)
*dst = src565;
bitmask = UPDATE_BITMASK (bitmask);
dst++;
}
}
}
else
{
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
bitcache = *mask++;
bitmask = CREATE_BITMASK (mask_x & 31);
 
while (w--)
{
if (bitmask == 0)
{
bitcache = *mask++;
bitmask = CREATE_BITMASK (0);
}
if (bitcache & bitmask)
{
d = over (src, CONVERT_0565_TO_0888 (*dst));
*dst = CONVERT_8888_TO_0565 (d);
}
bitmask = UPDATE_BITMASK (bitmask);
dst++;
}
}
}
}
 
/*
* Simple bitblt
*/
 
static void
fast_composite_solid_fill (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src;
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
if (dst_image->bits.format == PIXMAN_a8)
{
src = src >> 24;
}
else if (dst_image->bits.format == PIXMAN_r5g6b5 ||
dst_image->bits.format == PIXMAN_b5g6r5)
{
src = CONVERT_8888_TO_0565 (src);
}
 
pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
PIXMAN_FORMAT_BPP (dst_image->bits.format),
dest_x, dest_y,
width, height,
src);
}
 
static void
fast_composite_src_memcpy (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
int bpp = PIXMAN_FORMAT_BPP (dst_image->bits.format) / 8;
uint32_t n_bytes = width * bpp;
int dst_stride, src_stride;
uint8_t *dst;
uint8_t *src;
 
src_stride = src_image->bits.rowstride * 4;
dst_stride = dst_image->bits.rowstride * 4;
 
src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp;
dst = (uint8_t *)dst_image->bits.bits + dest_y * dst_stride + dest_x * bpp;
 
while (height--)
{
memcpy (dst, src, n_bytes);
 
dst += dst_stride;
src += src_stride;
}
}
 
FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER);
FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE);
FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD);
FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL);
FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER);
FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE);
FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD);
FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL);
FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER);
FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE);
FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD);
FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL);
FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL);
FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER);
FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE);
FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD);
FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL);
 
/* Use more unrolling for src_0565_0565 because it is typically CPU bound */
static force_inline void
scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
uint16_t * src,
int32_t w,
pixman_fixed_t vx,
pixman_fixed_t unit_x,
pixman_fixed_t max_vx)
{
uint16_t tmp1, tmp2, tmp3, tmp4;
while ((w -= 4) >= 0)
{
tmp1 = src[pixman_fixed_to_int (vx)];
vx += unit_x;
tmp2 = src[pixman_fixed_to_int (vx)];
vx += unit_x;
tmp3 = src[pixman_fixed_to_int (vx)];
vx += unit_x;
tmp4 = src[pixman_fixed_to_int (vx)];
vx += unit_x;
*dst++ = tmp1;
*dst++ = tmp2;
*dst++ = tmp3;
*dst++ = tmp4;
}
if (w & 2)
{
tmp1 = src[pixman_fixed_to_int (vx)];
vx += unit_x;
tmp2 = src[pixman_fixed_to_int (vx)];
vx += unit_x;
*dst++ = tmp1;
*dst++ = tmp2;
}
if (w & 1)
*dst++ = src[pixman_fixed_to_int (vx)];
}
 
FAST_NEAREST_MAINLOOP (565_565_cover_SRC,
scaled_nearest_scanline_565_565_SRC,
uint16_t, uint16_t, COVER);
FAST_NEAREST_MAINLOOP (565_565_none_SRC,
scaled_nearest_scanline_565_565_SRC,
uint16_t, uint16_t, NONE);
FAST_NEAREST_MAINLOOP (565_565_pad_SRC,
scaled_nearest_scanline_565_565_SRC,
uint16_t, uint16_t, PAD);
 
static force_inline uint32_t
fetch_nearest (pixman_repeat_t src_repeat,
pixman_format_code_t format,
uint32_t *src, int x, int src_width)
{
if (repeat (src_repeat, &x, src_width))
{
if (format == PIXMAN_x8r8g8b8)
return *(src + x) | 0xff000000;
else
return *(src + x);
}
else
{
return 0;
}
}
 
static force_inline void
combine_over (uint32_t s, uint32_t *dst)
{
if (s)
{
uint8_t ia = 0xff - (s >> 24);
 
if (ia)
UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s);
else
*dst = s;
}
}
 
static force_inline void
combine_src (uint32_t s, uint32_t *dst)
{
*dst = s;
}
 
static void
fast_composite_scaled_nearest (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line;
uint32_t *src_line;
int dst_stride, src_stride;
int src_width, src_height;
pixman_repeat_t src_repeat;
pixman_fixed_t unit_x, unit_y;
pixman_format_code_t src_format;
pixman_vector_t v;
pixman_fixed_t vy;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
/* pass in 0 instead of src_x and src_y because src_x and src_y need to be
* transformed from destination space to source space
*/
PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1);
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (!pixman_transform_point_3d (src_image->common.transform, &v))
return;
 
unit_x = src_image->common.transform->matrix[0][0];
unit_y = src_image->common.transform->matrix[1][1];
 
/* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
v.vector[0] -= pixman_fixed_e;
v.vector[1] -= pixman_fixed_e;
 
src_height = src_image->bits.height;
src_width = src_image->bits.width;
src_repeat = src_image->common.repeat;
src_format = src_image->bits.format;
 
vy = v.vector[1];
while (height--)
{
pixman_fixed_t vx = v.vector[0];
int y = pixman_fixed_to_int (vy);
uint32_t *dst = dst_line;
 
dst_line += dst_stride;
 
/* adjust the y location by a unit vector in the y direction
* this is equivalent to transforming y+1 of the destination point to source space */
vy += unit_y;
 
if (!repeat (src_repeat, &y, src_height))
{
if (op == PIXMAN_OP_SRC)
memset (dst, 0, sizeof (*dst) * width);
}
else
{
int w = width;
 
uint32_t *src = src_line + y * src_stride;
 
while (w >= 2)
{
uint32_t s1, s2;
int x1, x2;
 
x1 = pixman_fixed_to_int (vx);
vx += unit_x;
 
x2 = pixman_fixed_to_int (vx);
vx += unit_x;
 
w -= 2;
 
s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width);
s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width);
 
if (op == PIXMAN_OP_OVER)
{
combine_over (s1, dst++);
combine_over (s2, dst++);
}
else
{
combine_src (s1, dst++);
combine_src (s2, dst++);
}
}
 
while (w--)
{
uint32_t s;
int x;
 
x = pixman_fixed_to_int (vx);
vx += unit_x;
 
s = fetch_nearest (src_repeat, src_format, src, x, src_width);
 
if (op == PIXMAN_OP_OVER)
combine_over (s, dst++);
else
combine_src (s, dst++);
}
}
}
}
 
static const pixman_fast_path_t c_fast_paths[] =
{
PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8),
PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1000_1000),
PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8),
PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill),
PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill),
PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill),
PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill),
PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill),
PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8),
PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8),
 
SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888),
 
SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888),
 
SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565),
SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565),
 
SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565),
 
SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888),
 
SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565),
 
#define NEAREST_FAST_PATH(op,s,d) \
{ PIXMAN_OP_ ## op, \
PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \
PIXMAN_null, 0, \
PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
fast_composite_scaled_nearest, \
}
 
NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8),
NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8),
NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8),
NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8),
 
NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8),
NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8),
NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8),
NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8),
 
NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8),
NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8),
NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8),
NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8),
 
NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8),
NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8),
NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8),
NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8),
 
{ PIXMAN_OP_NONE },
};
 
static void
pixman_fill8 (uint32_t *bits,
int stride,
int x,
int y,
int width,
int height,
uint32_t xor)
{
int byte_stride = stride * (int) sizeof (uint32_t);
uint8_t *dst = (uint8_t *) bits;
uint8_t v = xor & 0xff;
int i;
 
dst = dst + y * byte_stride + x;
 
while (height--)
{
for (i = 0; i < width; ++i)
dst[i] = v;
 
dst += byte_stride;
}
}
 
static void
pixman_fill16 (uint32_t *bits,
int stride,
int x,
int y,
int width,
int height,
uint32_t xor)
{
int short_stride =
(stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
uint16_t *dst = (uint16_t *)bits;
uint16_t v = xor & 0xffff;
int i;
 
dst = dst + y * short_stride + x;
 
while (height--)
{
for (i = 0; i < width; ++i)
dst[i] = v;
 
dst += short_stride;
}
}
 
static void
pixman_fill32 (uint32_t *bits,
int stride,
int x,
int y,
int width,
int height,
uint32_t xor)
{
int i;
 
bits = bits + y * stride + x;
 
while (height--)
{
for (i = 0; i < width; ++i)
bits[i] = xor;
 
bits += stride;
}
}
 
static pixman_bool_t
fast_path_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
switch (bpp)
{
case 8:
pixman_fill8 (bits, stride, x, y, width, height, xor);
break;
 
case 16:
pixman_fill16 (bits, stride, x, y, width, height, xor);
break;
 
case 32:
pixman_fill32 (bits, stride, x, y, width, height, xor);
break;
 
default:
return _pixman_implementation_fill (
imp->delegate, bits, stride, bpp, x, y, width, height, xor);
break;
}
 
return TRUE;
}
 
pixman_implementation_t *
_pixman_implementation_create_fast_path (void)
{
pixman_implementation_t *general = _pixman_implementation_create_general ();
pixman_implementation_t *imp = _pixman_implementation_create (general, c_fast_paths);
 
imp->fill = fast_path_fill;
 
return imp;
}
/programs/develop/libraries/pixman/pixman-fast-path.h
0,0 → 1,451
/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Keith Packard, SuSE, Inc.
*/
 
#ifndef PIXMAN_FAST_PATH_H__
#define PIXMAN_FAST_PATH_H__
 
#include "pixman-private.h"
 
#define PIXMAN_REPEAT_COVER -1
 
static force_inline pixman_bool_t
repeat (pixman_repeat_t repeat, int *c, int size)
{
if (repeat == PIXMAN_REPEAT_NONE)
{
if (*c < 0 || *c >= size)
return FALSE;
}
else if (repeat == PIXMAN_REPEAT_NORMAL)
{
while (*c >= size)
*c -= size;
while (*c < 0)
*c += size;
}
else if (repeat == PIXMAN_REPEAT_PAD)
{
*c = CLIP (*c, 0, size - 1);
}
else /* REFLECT */
{
*c = MOD (*c, size * 2);
if (*c >= size)
*c = size * 2 - *c - 1;
}
return TRUE;
}
 
/*
* For each scanline fetched from source image with PAD repeat:
* - calculate how many pixels need to be padded on the left side
* - calculate how many pixels need to be padded on the right side
* - update width to only count pixels which are fetched from the image
* All this information is returned via 'width', 'left_pad', 'right_pad'
* arguments. The code is assuming that 'unit_x' is positive.
*
* Note: 64-bit math is used in order to avoid potential overflows, which
* is probably excessive in many cases. This particular function
* may need its own correctness test and performance tuning.
*/
static force_inline void
pad_repeat_get_scanline_bounds (int32_t source_image_width,
pixman_fixed_t vx,
pixman_fixed_t unit_x,
int32_t * width,
int32_t * left_pad,
int32_t * right_pad)
{
int64_t max_vx = (int64_t) source_image_width << 16;
int64_t tmp;
if (vx < 0)
{
tmp = ((int64_t) unit_x - 1 - vx) / unit_x;
if (tmp > *width)
{
*left_pad = *width;
*width = 0;
}
else
{
*left_pad = (int32_t) tmp;
*width -= (int32_t) tmp;
}
}
else
{
*left_pad = 0;
}
tmp = ((int64_t) unit_x - 1 - vx + max_vx) / unit_x - *left_pad;
if (tmp < 0)
{
*right_pad = *width;
*width = 0;
}
else if (tmp >= *width)
{
*right_pad = 0;
}
else
{
*right_pad = *width - (int32_t) tmp;
*width = (int32_t) tmp;
}
}
 
/* A macroified version of specialized nearest scalers for some
* common 8888 and 565 formats. It supports SRC and OVER ops.
*
* There are two repeat versions, one that handles repeat normal,
* and one without repeat handling that only works if the src region
* used is completely covered by the pre-repeated source samples.
*
* The loops are unrolled to process two pixels per iteration for better
* performance on most CPU architectures (superscalar processors
* can issue several operations simultaneously, other processors can hide
* instructions latencies by pipelining operations). Unrolling more
* does not make much sense because the compiler will start running out
* of spare registers soon.
*/
 
#define GET_8888_ALPHA(s) ((s) >> 24)
/* This is not actually used since we don't have an OVER with
565 source, but it is needed to build. */
#define GET_0565_ALPHA(s) 0xff
 
#define FAST_NEAREST_SCANLINE(scanline_func_name, SRC_FORMAT, DST_FORMAT, \
src_type_t, dst_type_t, OP, repeat_mode) \
static force_inline void \
scanline_func_name (dst_type_t *dst, \
src_type_t *src, \
int32_t w, \
pixman_fixed_t vx, \
pixman_fixed_t unit_x, \
pixman_fixed_t max_vx) \
{ \
uint32_t d; \
src_type_t s1, s2; \
uint8_t a1, a2; \
int x1, x2; \
\
if (PIXMAN_OP_ ## OP != PIXMAN_OP_SRC && PIXMAN_OP_ ## OP != PIXMAN_OP_OVER) \
abort(); \
\
while ((w -= 2) >= 0) \
{ \
x1 = vx >> 16; \
vx += unit_x; \
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
{ \
/* This works because we know that unit_x is positive */ \
while (vx >= max_vx) \
vx -= max_vx; \
} \
s1 = src[x1]; \
\
x2 = vx >> 16; \
vx += unit_x; \
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
{ \
/* This works because we know that unit_x is positive */ \
while (vx >= max_vx) \
vx -= max_vx; \
} \
s2 = src[x2]; \
\
if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \
{ \
a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \
a2 = GET_ ## SRC_FORMAT ## _ALPHA(s2); \
\
if (a1 == 0xff) \
{ \
*dst = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
} \
else if (s1) \
{ \
d = CONVERT_ ## DST_FORMAT ## _TO_8888 (*dst); \
s1 = CONVERT_ ## SRC_FORMAT ## _TO_8888 (s1); \
a1 ^= 0xff; \
UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \
*dst = CONVERT_8888_TO_ ## DST_FORMAT (d); \
} \
dst++; \
\
if (a2 == 0xff) \
{ \
*dst = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s2); \
} \
else if (s2) \
{ \
d = CONVERT_## DST_FORMAT ## _TO_8888 (*dst); \
s2 = CONVERT_## SRC_FORMAT ## _TO_8888 (s2); \
a2 ^= 0xff; \
UN8x4_MUL_UN8_ADD_UN8x4 (d, a2, s2); \
*dst = CONVERT_8888_TO_ ## DST_FORMAT (d); \
} \
dst++; \
} \
else /* PIXMAN_OP_SRC */ \
{ \
*dst++ = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
*dst++ = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s2); \
} \
} \
\
if (w & 1) \
{ \
x1 = vx >> 16; \
s1 = src[x1]; \
\
if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \
{ \
a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \
\
if (a1 == 0xff) \
{ \
*dst = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
} \
else if (s1) \
{ \
d = CONVERT_## DST_FORMAT ## _TO_8888 (*dst); \
s1 = CONVERT_ ## SRC_FORMAT ## _TO_8888 (s1); \
a1 ^= 0xff; \
UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \
*dst = CONVERT_8888_TO_ ## DST_FORMAT (d); \
} \
dst++; \
} \
else /* PIXMAN_OP_SRC */ \
{ \
*dst++ = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
} \
} \
}
 
#define FAST_NEAREST_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, dst_type_t, \
repeat_mode) \
static void \
fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, \
pixman_op_t op, \
pixman_image_t * src_image, \
pixman_image_t * mask_image, \
pixman_image_t * dst_image, \
int32_t src_x, \
int32_t src_y, \
int32_t mask_x, \
int32_t mask_y, \
int32_t dst_x, \
int32_t dst_y, \
int32_t width, \
int32_t height) \
{ \
dst_type_t *dst_line; \
src_type_t *src_first_line; \
int y; \
pixman_fixed_t max_vx = max_vx; /* suppress uninitialized variable warning */ \
pixman_fixed_t max_vy; \
pixman_vector_t v; \
pixman_fixed_t vx, vy; \
pixman_fixed_t unit_x, unit_y; \
int32_t left_pad, right_pad; \
\
src_type_t *src; \
dst_type_t *dst; \
int src_stride, dst_stride; \
\
PIXMAN_IMAGE_GET_LINE (dst_image, dst_x, dst_y, dst_type_t, dst_stride, dst_line, 1); \
/* pass in 0 instead of src_x and src_y because src_x and src_y need to be \
* transformed from destination space to source space */ \
PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \
\
/* reference point is the center of the pixel */ \
v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \
v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \
v.vector[2] = pixman_fixed_1; \
\
if (!pixman_transform_point_3d (src_image->common.transform, &v)) \
return; \
\
unit_x = src_image->common.transform->matrix[0][0]; \
unit_y = src_image->common.transform->matrix[1][1]; \
\
/* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ \
v.vector[0] -= pixman_fixed_e; \
v.vector[1] -= pixman_fixed_e; \
\
vx = v.vector[0]; \
vy = v.vector[1]; \
\
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
{ \
/* Clamp repeating positions inside the actual samples */ \
max_vx = src_image->bits.width << 16; \
max_vy = src_image->bits.height << 16; \
\
repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); \
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \
} \
\
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \
PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
{ \
pad_repeat_get_scanline_bounds (src_image->bits.width, vx, unit_x, \
&width, &left_pad, &right_pad); \
vx += left_pad * unit_x; \
} \
\
while (--height >= 0) \
{ \
dst = dst_line; \
dst_line += dst_stride; \
\
y = vy >> 16; \
vy += unit_y; \
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \
{ \
repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height); \
src = src_first_line + src_stride * y; \
if (left_pad > 0) \
{ \
scanline_func (dst, src, left_pad, 0, 0, 0); \
} \
if (width > 0) \
{ \
scanline_func (dst + left_pad, src, width, vx, unit_x, 0); \
} \
if (right_pad > 0) \
{ \
scanline_func (dst + left_pad + width, src + src_image->bits.width - 1, \
right_pad, 0, 0, 0); \
} \
} \
else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
{ \
static src_type_t zero = 0; \
if (y < 0 || y >= src_image->bits.height) \
{ \
scanline_func (dst, &zero, left_pad + width + right_pad, 0, 0, 0); \
continue; \
} \
src = src_first_line + src_stride * y; \
if (left_pad > 0) \
{ \
scanline_func (dst, &zero, left_pad, 0, 0, 0); \
} \
if (width > 0) \
{ \
scanline_func (dst + left_pad, src, width, vx, unit_x, 0); \
} \
if (right_pad > 0) \
{ \
scanline_func (dst + left_pad + width, &zero, right_pad, 0, 0, 0); \
} \
} \
else \
{ \
src = src_first_line + src_stride * y; \
scanline_func (dst, src, width, vx, unit_x, max_vx); \
} \
} \
}
 
/* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */
#define FAST_NEAREST_MAINLOOP(scale_func_name, scanline_func, src_type_t, dst_type_t, \
repeat_mode) \
FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, dst_type_t, \
repeat_mode) \
 
#define FAST_NEAREST(scale_func_name, SRC_FORMAT, DST_FORMAT, \
src_type_t, dst_type_t, OP, repeat_mode) \
FAST_NEAREST_SCANLINE(scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \
SRC_FORMAT, DST_FORMAT, src_type_t, dst_type_t, \
OP, repeat_mode) \
FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name ## _ ## OP, \
scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \
src_type_t, dst_type_t, repeat_mode) \
\
extern int no_such_variable
 
 
#define SCALED_NEAREST_FLAGS \
(FAST_PATH_SCALE_TRANSFORM | \
FAST_PATH_NO_ALPHA_MAP | \
FAST_PATH_NEAREST_FILTER | \
FAST_PATH_NO_ACCESSORS | \
FAST_PATH_NARROW_FORMAT)
 
#define SIMPLE_NEAREST_FAST_PATH_NORMAL(op,s,d,func) \
{ PIXMAN_OP_ ## op, \
PIXMAN_ ## s, \
(SCALED_NEAREST_FLAGS | \
FAST_PATH_NORMAL_REPEAT | \
FAST_PATH_X_UNIT_POSITIVE), \
PIXMAN_null, 0, \
PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
}
 
#define SIMPLE_NEAREST_FAST_PATH_PAD(op,s,d,func) \
{ PIXMAN_OP_ ## op, \
PIXMAN_ ## s, \
(SCALED_NEAREST_FLAGS | \
FAST_PATH_PAD_REPEAT | \
FAST_PATH_X_UNIT_POSITIVE), \
PIXMAN_null, 0, \
PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \
}
 
#define SIMPLE_NEAREST_FAST_PATH_NONE(op,s,d,func) \
{ PIXMAN_OP_ ## op, \
PIXMAN_ ## s, \
(SCALED_NEAREST_FLAGS | \
FAST_PATH_NONE_REPEAT | \
FAST_PATH_X_UNIT_POSITIVE), \
PIXMAN_null, 0, \
PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
}
 
#define SIMPLE_NEAREST_FAST_PATH_COVER(op,s,d,func) \
{ PIXMAN_OP_ ## op, \
PIXMAN_ ## s, \
SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP, \
PIXMAN_null, 0, \
PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
}
 
/* Prefer the use of 'cover' variant, because it is faster */
#define SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \
SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \
SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \
SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func), \
SIMPLE_NEAREST_FAST_PATH_NORMAL (op,s,d,func)
 
#endif
/programs/develop/libraries/pixman/pixman-general.c
0,0 → 1,315
/*
* Copyright © 2009 Red Hat, Inc.
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
* 2008 Aaron Plattner, NVIDIA Corporation
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Red Hat not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Red Hat makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pixman-private.h"
#include "pixman-combine32.h"
#include "pixman-private.h"
 
#define SCANLINE_BUFFER_LENGTH 8192
 
static void
general_composite_rect (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint64_t stack_scanline_buffer[(SCANLINE_BUFFER_LENGTH * 3 + 7) / 8];
uint8_t *scanline_buffer = (uint8_t *) stack_scanline_buffer;
uint8_t *src_buffer, *mask_buffer, *dest_buffer;
fetch_scanline_t fetch_src = NULL, fetch_mask = NULL, fetch_dest = NULL;
pixman_combine_32_func_t compose;
store_scanline_t store;
source_image_class_t src_class, mask_class;
pixman_bool_t component_alpha;
uint32_t *bits;
int32_t stride;
int narrow, Bpp;
int i;
 
narrow =
(src->common.flags & FAST_PATH_NARROW_FORMAT) &&
(!mask || mask->common.flags & FAST_PATH_NARROW_FORMAT) &&
(dest->common.flags & FAST_PATH_NARROW_FORMAT);
Bpp = narrow ? 4 : 8;
 
if (width * Bpp > SCANLINE_BUFFER_LENGTH)
{
scanline_buffer = pixman_malloc_abc (width, 3, Bpp);
 
if (!scanline_buffer)
return;
}
 
src_buffer = scanline_buffer;
mask_buffer = src_buffer + width * Bpp;
dest_buffer = mask_buffer + width * Bpp;
 
src_class = _pixman_image_classify (src,
src_x, src_y,
width, height);
 
mask_class = SOURCE_IMAGE_CLASS_UNKNOWN;
 
if (mask)
{
mask_class = _pixman_image_classify (mask,
src_x, src_y,
width, height);
}
 
if (op == PIXMAN_OP_CLEAR)
fetch_src = NULL;
else if (narrow)
fetch_src = _pixman_image_get_scanline_32;
else
fetch_src = _pixman_image_get_scanline_64;
 
if (!mask || op == PIXMAN_OP_CLEAR)
fetch_mask = NULL;
else if (narrow)
fetch_mask = _pixman_image_get_scanline_32;
else
fetch_mask = _pixman_image_get_scanline_64;
 
if (op == PIXMAN_OP_CLEAR || op == PIXMAN_OP_SRC)
fetch_dest = NULL;
else if (narrow)
fetch_dest = _pixman_image_get_scanline_32;
else
fetch_dest = _pixman_image_get_scanline_64;
 
if (narrow)
store = _pixman_image_store_scanline_32;
else
store = _pixman_image_store_scanline_64;
 
/* Skip the store step and composite directly into the
* destination if the output format of the compose func matches
* the destination format.
*
* If the destination format is a8r8g8b8 then we can always do
* this. If it is x8r8g8b8, then we can only do it if the
* operator doesn't make use of destination alpha.
*/
if ((dest->bits.format == PIXMAN_a8r8g8b8) ||
(dest->bits.format == PIXMAN_x8r8g8b8 &&
(op == PIXMAN_OP_OVER ||
op == PIXMAN_OP_ADD ||
op == PIXMAN_OP_SRC ||
op == PIXMAN_OP_CLEAR ||
op == PIXMAN_OP_IN_REVERSE ||
op == PIXMAN_OP_OUT_REVERSE ||
op == PIXMAN_OP_DST)))
{
if (narrow &&
!dest->common.alpha_map &&
!dest->bits.write_func)
{
store = NULL;
}
}
 
if (!store)
{
bits = dest->bits.bits;
stride = dest->bits.rowstride;
}
else
{
bits = NULL;
stride = 0;
}
 
component_alpha =
fetch_src &&
fetch_mask &&
mask &&
mask->common.type == BITS &&
mask->common.component_alpha &&
PIXMAN_FORMAT_RGB (mask->bits.format);
 
if (narrow)
{
if (component_alpha)
compose = _pixman_implementation_combine_32_ca;
else
compose = _pixman_implementation_combine_32;
}
else
{
if (component_alpha)
compose = (pixman_combine_32_func_t)_pixman_implementation_combine_64_ca;
else
compose = (pixman_combine_32_func_t)_pixman_implementation_combine_64;
}
 
if (!compose)
return;
 
if (!fetch_mask)
mask_buffer = NULL;
 
for (i = 0; i < height; ++i)
{
/* fill first half of scanline with source */
if (fetch_src)
{
if (fetch_mask)
{
/* fetch mask before source so that fetching of
source can be optimized */
fetch_mask (mask, mask_x, mask_y + i,
width, (void *)mask_buffer, 0);
 
if (mask_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
fetch_mask = NULL;
}
 
if (src_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
{
fetch_src (src, src_x, src_y + i,
width, (void *)src_buffer, 0);
fetch_src = NULL;
}
else
{
fetch_src (src, src_x, src_y + i,
width, (void *)src_buffer, (void *)mask_buffer);
}
}
else if (fetch_mask)
{
fetch_mask (mask, mask_x, mask_y + i,
width, (void *)mask_buffer, 0);
}
 
if (store)
{
/* fill dest into second half of scanline */
if (fetch_dest)
{
fetch_dest (dest, dest_x, dest_y + i,
width, (void *)dest_buffer, 0);
}
 
/* blend */
compose (imp->toplevel, op,
(void *)dest_buffer,
(void *)src_buffer,
(void *)mask_buffer,
width);
 
/* write back */
store (&(dest->bits), dest_x, dest_y + i, width,
(void *)dest_buffer);
}
else
{
/* blend */
compose (imp->toplevel, op,
bits + (dest_y + i) * stride + dest_x,
(void *)src_buffer, (void *)mask_buffer, width);
}
}
 
if (scanline_buffer != (uint8_t *) stack_scanline_buffer)
free (scanline_buffer);
}
 
static const pixman_fast_path_t general_fast_path[] =
{
{ PIXMAN_OP_any, PIXMAN_any, 0, PIXMAN_any, 0, PIXMAN_any, 0, general_composite_rect },
{ PIXMAN_OP_NONE }
};
 
static pixman_bool_t
general_blt (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
/* We can't blit unless we have sse2 or mmx */
 
return FALSE;
}
 
static pixman_bool_t
general_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return FALSE;
}
 
pixman_implementation_t *
_pixman_implementation_create_general (void)
{
pixman_implementation_t *imp = _pixman_implementation_create (NULL, general_fast_path);
 
_pixman_setup_combiner_functions_32 (imp);
_pixman_setup_combiner_functions_64 (imp);
 
imp->blt = general_blt;
imp->fill = general_fill;
 
return imp;
}
 
/programs/develop/libraries/pixman/pixman-gradient-walker.c
0,0 → 1,254
/*
*
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
 
void
_pixman_gradient_walker_init (pixman_gradient_walker_t *walker,
gradient_t * gradient,
unsigned int spread)
{
walker->num_stops = gradient->n_stops;
walker->stops = gradient->stops;
walker->left_x = 0;
walker->right_x = 0x10000;
walker->stepper = 0;
walker->left_ag = 0;
walker->left_rb = 0;
walker->right_ag = 0;
walker->right_rb = 0;
walker->spread = spread;
 
walker->need_reset = TRUE;
}
 
void
_pixman_gradient_walker_reset (pixman_gradient_walker_t *walker,
pixman_fixed_32_32_t pos)
{
int32_t x, left_x, right_x;
pixman_color_t *left_c, *right_c;
int n, count = walker->num_stops;
pixman_gradient_stop_t * stops = walker->stops;
 
static const pixman_color_t transparent_black = { 0, 0, 0, 0 };
 
switch (walker->spread)
{
case PIXMAN_REPEAT_NORMAL:
x = (int32_t)pos & 0xFFFF;
for (n = 0; n < count; n++)
if (x < stops[n].x)
break;
if (n == 0)
{
left_x = stops[count - 1].x - 0x10000;
left_c = &stops[count - 1].color;
}
else
{
left_x = stops[n - 1].x;
left_c = &stops[n - 1].color;
}
 
if (n == count)
{
right_x = stops[0].x + 0x10000;
right_c = &stops[0].color;
}
else
{
right_x = stops[n].x;
right_c = &stops[n].color;
}
left_x += (pos - x);
right_x += (pos - x);
break;
 
case PIXMAN_REPEAT_PAD:
for (n = 0; n < count; n++)
if (pos < stops[n].x)
break;
 
if (n == 0)
{
left_x = INT32_MIN;
left_c = &stops[0].color;
}
else
{
left_x = stops[n - 1].x;
left_c = &stops[n - 1].color;
}
 
if (n == count)
{
right_x = INT32_MAX;
right_c = &stops[n - 1].color;
}
else
{
right_x = stops[n].x;
right_c = &stops[n].color;
}
break;
 
case PIXMAN_REPEAT_REFLECT:
x = (int32_t)pos & 0xFFFF;
if ((int32_t)pos & 0x10000)
x = 0x10000 - x;
for (n = 0; n < count; n++)
if (x < stops[n].x)
break;
 
if (n == 0)
{
left_x = -stops[0].x;
left_c = &stops[0].color;
}
else
{
left_x = stops[n - 1].x;
left_c = &stops[n - 1].color;
}
 
if (n == count)
{
right_x = 0x20000 - stops[n - 1].x;
right_c = &stops[n - 1].color;
}
else
{
right_x = stops[n].x;
right_c = &stops[n].color;
}
 
if ((int32_t)pos & 0x10000)
{
pixman_color_t *tmp_c;
int32_t tmp_x;
 
tmp_x = 0x10000 - right_x;
right_x = 0x10000 - left_x;
left_x = tmp_x;
 
tmp_c = right_c;
right_c = left_c;
left_c = tmp_c;
 
x = 0x10000 - x;
}
left_x += (pos - x);
right_x += (pos - x);
break;
 
default: /* REPEAT_NONE */
for (n = 0; n < count; n++)
if (pos < stops[n].x)
break;
 
if (n == 0)
{
left_x = INT32_MIN;
right_x = stops[0].x;
left_c = right_c = (pixman_color_t*) &transparent_black;
}
else if (n == count)
{
left_x = stops[n - 1].x;
right_x = INT32_MAX;
left_c = right_c = (pixman_color_t*) &transparent_black;
}
else
{
left_x = stops[n - 1].x;
right_x = stops[n].x;
left_c = &stops[n - 1].color;
right_c = &stops[n].color;
}
}
 
walker->left_x = left_x;
walker->right_x = right_x;
walker->left_ag = ((left_c->alpha >> 8) << 16) | (left_c->green >> 8);
walker->left_rb = ((left_c->red & 0xff00) << 8) | (left_c->blue >> 8);
walker->right_ag = ((right_c->alpha >> 8) << 16) | (right_c->green >> 8);
walker->right_rb = ((right_c->red & 0xff00) << 8) | (right_c->blue >> 8);
 
if (walker->left_x == walker->right_x ||
( walker->left_ag == walker->right_ag &&
walker->left_rb == walker->right_rb ) )
{
walker->stepper = 0;
}
else
{
int32_t width = right_x - left_x;
walker->stepper = ((1 << 24) + width / 2) / width;
}
 
walker->need_reset = FALSE;
}
 
#define PIXMAN_GRADIENT_WALKER_NEED_RESET(w, x) \
( (w)->need_reset || (x) < (w)->left_x || (x) >= (w)->right_x)
 
 
/* the following assumes that PIXMAN_GRADIENT_WALKER_NEED_RESET(w,x) is FALSE */
uint32_t
_pixman_gradient_walker_pixel (pixman_gradient_walker_t *walker,
pixman_fixed_32_32_t x)
{
int dist, idist;
uint32_t t1, t2, a, color;
 
if (PIXMAN_GRADIENT_WALKER_NEED_RESET (walker, x))
_pixman_gradient_walker_reset (walker, x);
 
dist = ((int)(x - walker->left_x) * walker->stepper) >> 16;
idist = 256 - dist;
 
/* combined INTERPOLATE and premultiply */
t1 = walker->left_rb * idist + walker->right_rb * dist;
t1 = (t1 >> 8) & 0xff00ff;
 
t2 = walker->left_ag * idist + walker->right_ag * dist;
t2 &= 0xff00ff00;
 
color = t2 & 0xff000000;
a = t2 >> 24;
 
t1 = t1 * a + 0x800080;
t1 = (t1 + ((t1 >> 8) & 0xff00ff)) >> 8;
 
t2 = (t2 >> 8) * a + 0x800080;
t2 = (t2 + ((t2 >> 8) & 0xff00ff));
 
return (color | (t1 & 0xff00ff) | (t2 & 0xff00));
}
 
/programs/develop/libraries/pixman/pixman-image.c
0,0 → 1,846
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
 
#include "pixman-private.h"
#include "pixman-combine32.h"
 
pixman_bool_t
_pixman_init_gradient (gradient_t * gradient,
const pixman_gradient_stop_t *stops,
int n_stops)
{
return_val_if_fail (n_stops > 0, FALSE);
 
gradient->stops = pixman_malloc_ab (n_stops, sizeof (pixman_gradient_stop_t));
if (!gradient->stops)
return FALSE;
 
memcpy (gradient->stops, stops, n_stops * sizeof (pixman_gradient_stop_t));
 
gradient->n_stops = n_stops;
 
gradient->stop_range = 0xffff;
 
return TRUE;
}
 
/*
* By default, just evaluate the image at 32bpp and expand. Individual image
* types can plug in a better scanline getter if they want to. For example
* we could produce smoother gradients by evaluating them at higher color
* depth, but that's a project for the future.
*/
void
_pixman_image_get_scanline_generic_64 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
uint32_t *mask8 = NULL;
 
/* Contract the mask image, if one exists, so that the 32-bit fetch
* function can use it.
*/
if (mask)
{
mask8 = pixman_malloc_ab (width, sizeof(uint32_t));
if (!mask8)
return;
 
pixman_contract (mask8, (uint64_t *)mask, width);
}
 
/* Fetch the source image into the first half of buffer. */
_pixman_image_get_scanline_32 (image, x, y, width, (uint32_t*)buffer, mask8);
 
/* Expand from 32bpp to 64bpp in place. */
pixman_expand ((uint64_t *)buffer, buffer, PIXMAN_a8r8g8b8, width);
 
free (mask8);
}
 
pixman_image_t *
_pixman_image_allocate (void)
{
pixman_image_t *image = malloc (sizeof (pixman_image_t));
 
if (image)
{
image_common_t *common = &image->common;
 
pixman_region32_init (&common->clip_region);
 
common->alpha_count = 0;
common->have_clip_region = FALSE;
common->clip_sources = FALSE;
common->transform = NULL;
common->repeat = PIXMAN_REPEAT_NONE;
common->filter = PIXMAN_FILTER_NEAREST;
common->filter_params = NULL;
common->n_filter_params = 0;
common->alpha_map = NULL;
common->component_alpha = FALSE;
common->ref_count = 1;
common->classify = NULL;
common->client_clip = FALSE;
common->destroy_func = NULL;
common->destroy_data = NULL;
common->dirty = TRUE;
}
 
return image;
}
 
source_image_class_t
_pixman_image_classify (pixman_image_t *image,
int x,
int y,
int width,
int height)
{
if (image->common.classify)
return image->common.classify (image, x, y, width, height);
else
return SOURCE_IMAGE_CLASS_UNKNOWN;
}
 
void
_pixman_image_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
image->common.get_scanline_32 (image, x, y, width, buffer, mask);
}
 
/* Even thought the type of buffer is uint32_t *, the function actually expects
* a uint64_t *buffer.
*/
void
_pixman_image_get_scanline_64 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *unused)
{
image->common.get_scanline_64 (image, x, y, width, buffer, unused);
}
 
static void
image_property_changed (pixman_image_t *image)
{
image->common.dirty = TRUE;
}
 
/* Ref Counting */
PIXMAN_EXPORT pixman_image_t *
pixman_image_ref (pixman_image_t *image)
{
image->common.ref_count++;
 
return image;
}
 
/* returns TRUE when the image is freed */
PIXMAN_EXPORT pixman_bool_t
pixman_image_unref (pixman_image_t *image)
{
image_common_t *common = (image_common_t *)image;
 
common->ref_count--;
 
if (common->ref_count == 0)
{
if (image->common.destroy_func)
image->common.destroy_func (image, image->common.destroy_data);
 
pixman_region32_fini (&common->clip_region);
 
if (common->transform)
free (common->transform);
 
if (common->filter_params)
free (common->filter_params);
 
if (common->alpha_map)
pixman_image_unref ((pixman_image_t *)common->alpha_map);
 
if (image->type == LINEAR ||
image->type == RADIAL ||
image->type == CONICAL)
{
if (image->gradient.stops)
free (image->gradient.stops);
}
 
if (image->type == BITS && image->bits.free_me)
free (image->bits.free_me);
 
free (image);
 
return TRUE;
}
 
return FALSE;
}
 
PIXMAN_EXPORT void
pixman_image_set_destroy_function (pixman_image_t * image,
pixman_image_destroy_func_t func,
void * data)
{
image->common.destroy_func = func;
image->common.destroy_data = data;
}
 
PIXMAN_EXPORT void *
pixman_image_get_destroy_data (pixman_image_t *image)
{
return image->common.destroy_data;
}
 
void
_pixman_image_reset_clip_region (pixman_image_t *image)
{
image->common.have_clip_region = FALSE;
}
 
static pixman_bool_t out_of_bounds_workaround = TRUE;
 
/* Old X servers rely on out-of-bounds accesses when they are asked
* to composite with a window as the source. They create a pixman image
* pointing to some bogus position in memory, but then they set a clip
* region to the position where the actual bits are.
*
* Due to a bug in old versions of pixman, where it would not clip
* against the image bounds when a clip region was set, this would
* actually work. So by default we allow certain out-of-bound access
* to happen unless explicitly disabled.
*
* Fixed X servers should call this function to disable the workaround.
*/
PIXMAN_EXPORT void
pixman_disable_out_of_bounds_workaround (void)
{
out_of_bounds_workaround = FALSE;
}
 
static pixman_bool_t
source_image_needs_out_of_bounds_workaround (bits_image_t *image)
{
if (image->common.clip_sources &&
image->common.repeat == PIXMAN_REPEAT_NONE &&
image->common.have_clip_region &&
out_of_bounds_workaround)
{
if (!image->common.client_clip)
{
/* There is no client clip, so if the clip region extends beyond the
* drawable geometry, it must be because the X server generated the
* bogus clip region.
*/
const pixman_box32_t *extents =
pixman_region32_extents (&image->common.clip_region);
 
if (extents->x1 >= 0 && extents->x2 <= image->width &&
extents->y1 >= 0 && extents->y2 <= image->height)
{
return FALSE;
}
}
 
return TRUE;
}
 
return FALSE;
}
 
static void
compute_image_info (pixman_image_t *image)
{
pixman_format_code_t code;
uint32_t flags = 0;
 
/* Transform */
if (!image->common.transform)
{
flags |= (FAST_PATH_ID_TRANSFORM |
FAST_PATH_X_UNIT_POSITIVE |
FAST_PATH_Y_UNIT_ZERO |
FAST_PATH_AFFINE_TRANSFORM);
}
else
{
flags |= FAST_PATH_HAS_TRANSFORM;
 
if (image->common.transform->matrix[2][0] == 0 &&
image->common.transform->matrix[2][1] == 0 &&
image->common.transform->matrix[2][2] == pixman_fixed_1)
{
flags |= FAST_PATH_AFFINE_TRANSFORM;
 
if (image->common.transform->matrix[0][1] == 0 &&
image->common.transform->matrix[1][0] == 0)
{
flags |= FAST_PATH_SCALE_TRANSFORM;
}
}
 
if (image->common.transform->matrix[0][0] > 0)
flags |= FAST_PATH_X_UNIT_POSITIVE;
 
if (image->common.transform->matrix[1][0] == 0)
flags |= FAST_PATH_Y_UNIT_ZERO;
}
 
/* Filter */
switch (image->common.filter)
{
case PIXMAN_FILTER_NEAREST:
case PIXMAN_FILTER_FAST:
flags |= (FAST_PATH_NEAREST_FILTER | FAST_PATH_NO_CONVOLUTION_FILTER);
break;
 
case PIXMAN_FILTER_BILINEAR:
case PIXMAN_FILTER_GOOD:
case PIXMAN_FILTER_BEST:
flags |= (FAST_PATH_BILINEAR_FILTER | FAST_PATH_NO_CONVOLUTION_FILTER);
break;
 
case PIXMAN_FILTER_CONVOLUTION:
break;
 
default:
flags |= FAST_PATH_NO_CONVOLUTION_FILTER;
break;
}
 
/* Repeat mode */
switch (image->common.repeat)
{
case PIXMAN_REPEAT_NONE:
flags |=
FAST_PATH_NO_REFLECT_REPEAT |
FAST_PATH_NO_PAD_REPEAT |
FAST_PATH_NO_NORMAL_REPEAT;
break;
 
case PIXMAN_REPEAT_REFLECT:
flags |=
FAST_PATH_NO_PAD_REPEAT |
FAST_PATH_NO_NONE_REPEAT |
FAST_PATH_NO_NORMAL_REPEAT;
break;
 
case PIXMAN_REPEAT_PAD:
flags |=
FAST_PATH_NO_REFLECT_REPEAT |
FAST_PATH_NO_NONE_REPEAT |
FAST_PATH_NO_NORMAL_REPEAT;
break;
 
default:
flags |=
FAST_PATH_NO_REFLECT_REPEAT |
FAST_PATH_NO_PAD_REPEAT |
FAST_PATH_NO_NONE_REPEAT;
break;
}
 
/* Component alpha */
if (image->common.component_alpha)
flags |= FAST_PATH_COMPONENT_ALPHA;
else
flags |= FAST_PATH_UNIFIED_ALPHA;
 
flags |= (FAST_PATH_NO_ACCESSORS | FAST_PATH_NARROW_FORMAT);
 
/* Type specific checks */
switch (image->type)
{
case SOLID:
code = PIXMAN_solid;
 
if (image->solid.color.alpha == 0xffff)
flags |= FAST_PATH_IS_OPAQUE;
break;
 
case BITS:
if (image->bits.width == 1 &&
image->bits.height == 1 &&
image->common.repeat != PIXMAN_REPEAT_NONE)
{
code = PIXMAN_solid;
}
else
{
code = image->bits.format;
}
 
if (!PIXMAN_FORMAT_A (image->bits.format) &&
PIXMAN_FORMAT_TYPE (image->bits.format) != PIXMAN_TYPE_GRAY &&
PIXMAN_FORMAT_TYPE (image->bits.format) != PIXMAN_TYPE_COLOR)
{
flags |= FAST_PATH_SAMPLES_OPAQUE;
 
if (image->common.repeat != PIXMAN_REPEAT_NONE)
flags |= FAST_PATH_IS_OPAQUE;
}
 
if (source_image_needs_out_of_bounds_workaround (&image->bits))
flags |= FAST_PATH_NEEDS_WORKAROUND;
 
if (image->bits.read_func || image->bits.write_func)
flags &= ~FAST_PATH_NO_ACCESSORS;
 
if (PIXMAN_FORMAT_IS_WIDE (image->bits.format))
flags &= ~FAST_PATH_NARROW_FORMAT;
break;
 
case RADIAL:
code = PIXMAN_unknown;
 
/*
* As explained in pixman-radial-gradient.c, every point of
* the plane has a valid associated radius (and thus will be
* colored) if and only if a is negative (i.e. one of the two
* circles contains the other one).
*/
 
if (image->radial.a >= 0)
break;
 
/* Fall through */
 
case LINEAR:
code = PIXMAN_unknown;
 
if (image->common.repeat != PIXMAN_REPEAT_NONE)
{
int i;
 
flags |= FAST_PATH_IS_OPAQUE;
for (i = 0; i < image->gradient.n_stops; ++i)
{
if (image->gradient.stops[i].color.alpha != 0xffff)
{
flags &= ~FAST_PATH_IS_OPAQUE;
break;
}
}
}
break;
 
default:
code = PIXMAN_unknown;
break;
}
 
/* Alpha map */
if (!image->common.alpha_map)
{
flags |= FAST_PATH_NO_ALPHA_MAP;
}
else
{
if (PIXMAN_FORMAT_IS_WIDE (image->common.alpha_map->format))
flags &= ~FAST_PATH_NARROW_FORMAT;
}
 
/* Both alpha maps and convolution filters can introduce
* non-opaqueness in otherwise opaque images. Also
* an image with component alpha turned on is only opaque
* if all channels are opaque, so we simply turn it off
* unconditionally for those images.
*/
if (image->common.alpha_map ||
image->common.filter == PIXMAN_FILTER_CONVOLUTION ||
image->common.component_alpha)
{
flags &= ~(FAST_PATH_IS_OPAQUE | FAST_PATH_SAMPLES_OPAQUE);
}
 
image->common.flags = flags;
image->common.extended_format_code = code;
}
 
void
_pixman_image_validate (pixman_image_t *image)
{
if (image->common.dirty)
{
compute_image_info (image);
 
/* It is important that property_changed is
* called *after* compute_image_info() because
* property_changed() can make use of the flags
* to set up accessors etc.
*/
image->common.property_changed (image);
 
image->common.dirty = FALSE;
}
 
if (image->common.alpha_map)
_pixman_image_validate ((pixman_image_t *)image->common.alpha_map);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_clip_region32 (pixman_image_t * image,
pixman_region32_t *region)
{
image_common_t *common = (image_common_t *)image;
pixman_bool_t result;
 
if (region)
{
if ((result = pixman_region32_copy (&common->clip_region, region)))
image->common.have_clip_region = TRUE;
}
else
{
_pixman_image_reset_clip_region (image);
 
result = TRUE;
}
 
image_property_changed (image);
 
return result;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_clip_region (pixman_image_t * image,
pixman_region16_t *region)
{
image_common_t *common = (image_common_t *)image;
pixman_bool_t result;
 
if (region)
{
if ((result = pixman_region32_copy_from_region16 (&common->clip_region, region)))
image->common.have_clip_region = TRUE;
}
else
{
_pixman_image_reset_clip_region (image);
 
result = TRUE;
}
 
image_property_changed (image);
 
return result;
}
 
PIXMAN_EXPORT void
pixman_image_set_has_client_clip (pixman_image_t *image,
pixman_bool_t client_clip)
{
image->common.client_clip = client_clip;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_transform (pixman_image_t * image,
const pixman_transform_t *transform)
{
static const pixman_transform_t id =
{
{ { pixman_fixed_1, 0, 0 },
{ 0, pixman_fixed_1, 0 },
{ 0, 0, pixman_fixed_1 } }
};
 
image_common_t *common = (image_common_t *)image;
pixman_bool_t result;
 
if (common->transform == transform)
return TRUE;
 
if (memcmp (&id, transform, sizeof (pixman_transform_t)) == 0)
{
free (common->transform);
common->transform = NULL;
result = TRUE;
 
goto out;
}
 
if (common->transform == NULL)
common->transform = malloc (sizeof (pixman_transform_t));
 
if (common->transform == NULL)
{
result = FALSE;
 
goto out;
}
 
memcpy (common->transform, transform, sizeof(pixman_transform_t));
 
result = TRUE;
 
out:
image_property_changed (image);
 
return result;
}
 
PIXMAN_EXPORT void
pixman_image_set_repeat (pixman_image_t *image,
pixman_repeat_t repeat)
{
image->common.repeat = repeat;
 
image_property_changed (image);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_set_filter (pixman_image_t * image,
pixman_filter_t filter,
const pixman_fixed_t *params,
int n_params)
{
image_common_t *common = (image_common_t *)image;
pixman_fixed_t *new_params;
 
if (params == common->filter_params && filter == common->filter)
return TRUE;
 
new_params = NULL;
if (params)
{
new_params = pixman_malloc_ab (n_params, sizeof (pixman_fixed_t));
if (!new_params)
return FALSE;
 
memcpy (new_params,
params, n_params * sizeof (pixman_fixed_t));
}
 
common->filter = filter;
 
if (common->filter_params)
free (common->filter_params);
 
common->filter_params = new_params;
common->n_filter_params = n_params;
 
image_property_changed (image);
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_image_set_source_clipping (pixman_image_t *image,
pixman_bool_t clip_sources)
{
image->common.clip_sources = clip_sources;
 
image_property_changed (image);
}
 
/* Unlike all the other property setters, this function does not
* copy the content of indexed. Doing this copying is simply
* way, way too expensive.
*/
PIXMAN_EXPORT void
pixman_image_set_indexed (pixman_image_t * image,
const pixman_indexed_t *indexed)
{
bits_image_t *bits = (bits_image_t *)image;
 
bits->indexed = indexed;
 
image_property_changed (image);
}
 
PIXMAN_EXPORT void
pixman_image_set_alpha_map (pixman_image_t *image,
pixman_image_t *alpha_map,
int16_t x,
int16_t y)
{
image_common_t *common = (image_common_t *)image;
 
return_if_fail (!alpha_map || alpha_map->type == BITS);
 
if (alpha_map && common->alpha_count > 0)
{
/* If this image is being used as an alpha map itself,
* then you can't give it an alpha map of its own.
*/
return;
}
 
if (alpha_map && alpha_map->common.alpha_map)
{
/* If the image has an alpha map of its own,
* then it can't be used as an alpha map itself
*/
return;
}
 
if (common->alpha_map != (bits_image_t *)alpha_map)
{
if (common->alpha_map)
{
common->alpha_map->common.alpha_count--;
 
pixman_image_unref ((pixman_image_t *)common->alpha_map);
}
 
if (alpha_map)
{
common->alpha_map = (bits_image_t *)pixman_image_ref (alpha_map);
 
common->alpha_map->common.alpha_count++;
}
else
{
common->alpha_map = NULL;
}
}
 
common->alpha_origin_x = x;
common->alpha_origin_y = y;
 
image_property_changed (image);
}
 
PIXMAN_EXPORT void
pixman_image_set_component_alpha (pixman_image_t *image,
pixman_bool_t component_alpha)
{
image->common.component_alpha = component_alpha;
 
image_property_changed (image);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_get_component_alpha (pixman_image_t *image)
{
return image->common.component_alpha;
}
 
PIXMAN_EXPORT void
pixman_image_set_accessors (pixman_image_t * image,
pixman_read_memory_func_t read_func,
pixman_write_memory_func_t write_func)
{
return_if_fail (image != NULL);
 
if (image->type == BITS)
{
image->bits.read_func = read_func;
image->bits.write_func = write_func;
 
image_property_changed (image);
}
}
 
PIXMAN_EXPORT uint32_t *
pixman_image_get_data (pixman_image_t *image)
{
if (image->type == BITS)
return image->bits.bits;
 
return NULL;
}
 
PIXMAN_EXPORT int
pixman_image_get_width (pixman_image_t *image)
{
if (image->type == BITS)
return image->bits.width;
 
return 0;
}
 
PIXMAN_EXPORT int
pixman_image_get_height (pixman_image_t *image)
{
if (image->type == BITS)
return image->bits.height;
 
return 0;
}
 
PIXMAN_EXPORT int
pixman_image_get_stride (pixman_image_t *image)
{
if (image->type == BITS)
return image->bits.rowstride * (int) sizeof (uint32_t);
 
return 0;
}
 
PIXMAN_EXPORT int
pixman_image_get_depth (pixman_image_t *image)
{
if (image->type == BITS)
return PIXMAN_FORMAT_DEPTH (image->bits.format);
 
return 0;
}
 
PIXMAN_EXPORT pixman_format_code_t
pixman_image_get_format (pixman_image_t *image)
{
if (image->type == BITS)
return image->bits.format;
 
return 0;
}
 
uint32_t
_pixman_image_get_solid (pixman_image_t * image,
pixman_format_code_t format)
{
uint32_t result;
 
_pixman_image_get_scanline_32 (image, 0, 0, 1, &result, NULL);
 
/* If necessary, convert RGB <--> BGR. */
if (PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB)
{
result = (((result & 0xff000000) >> 0) |
((result & 0x00ff0000) >> 16) |
((result & 0x0000ff00) >> 0) |
((result & 0x000000ff) << 16));
}
 
return result;
}
/programs/develop/libraries/pixman/pixman-implementation.c
0,0 → 1,227
/*
* Copyright © 2009 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Red Hat not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Red Hat makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include "pixman-private.h"
 
static void
delegate_combine_32 (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
_pixman_implementation_combine_32 (imp->delegate,
op, dest, src, mask, width);
}
 
static void
delegate_combine_64 (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
_pixman_implementation_combine_64 (imp->delegate,
op, dest, src, mask, width);
}
 
static void
delegate_combine_32_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
_pixman_implementation_combine_32_ca (imp->delegate,
op, dest, src, mask, width);
}
 
static void
delegate_combine_64_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
_pixman_implementation_combine_64_ca (imp->delegate,
op, dest, src, mask, width);
}
 
static pixman_bool_t
delegate_blt (pixman_implementation_t * imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
return _pixman_implementation_blt (
imp->delegate, src_bits, dst_bits, src_stride, dst_stride,
src_bpp, dst_bpp, src_x, src_y, dst_x, dst_y,
width, height);
}
 
static pixman_bool_t
delegate_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return _pixman_implementation_fill (
imp->delegate, bits, stride, bpp, x, y, width, height, xor);
}
 
pixman_implementation_t *
_pixman_implementation_create (pixman_implementation_t *delegate,
const pixman_fast_path_t *fast_paths)
{
pixman_implementation_t *imp = malloc (sizeof (pixman_implementation_t));
pixman_implementation_t *d;
int i;
 
if (!imp)
return NULL;
 
assert (fast_paths);
 
/* Make sure the whole delegate chain has the right toplevel */
imp->delegate = delegate;
for (d = imp; d != NULL; d = d->delegate)
d->toplevel = imp;
 
/* Fill out function pointers with ones that just delegate
*/
imp->blt = delegate_blt;
imp->fill = delegate_fill;
 
for (i = 0; i < PIXMAN_N_OPERATORS; ++i)
{
imp->combine_32[i] = delegate_combine_32;
imp->combine_64[i] = delegate_combine_64;
imp->combine_32_ca[i] = delegate_combine_32_ca;
imp->combine_64_ca[i] = delegate_combine_64_ca;
}
 
imp->fast_paths = fast_paths;
return imp;
}
 
void
_pixman_implementation_combine_32 (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
(*imp->combine_32[op]) (imp, op, dest, src, mask, width);
}
 
void
_pixman_implementation_combine_64 (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
(*imp->combine_64[op]) (imp, op, dest, src, mask, width);
}
 
void
_pixman_implementation_combine_32_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
(*imp->combine_32_ca[op]) (imp, op, dest, src, mask, width);
}
 
void
_pixman_implementation_combine_64_ca (pixman_implementation_t * imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width)
{
(*imp->combine_64_ca[op]) (imp, op, dest, src, mask, width);
}
 
pixman_bool_t
_pixman_implementation_blt (pixman_implementation_t * imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
return (*imp->blt) (imp, src_bits, dst_bits, src_stride, dst_stride,
src_bpp, dst_bpp, src_x, src_y, dst_x, dst_y,
width, height);
}
 
pixman_bool_t
_pixman_implementation_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return (*imp->fill) (imp, bits, stride, bpp, x, y, width, height, xor);
}
 
/programs/develop/libraries/pixman/pixman-linear-gradient.c
0,0 → 1,262
/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include "pixman-private.h"
 
static source_image_class_t
linear_gradient_classify (pixman_image_t *image,
int x,
int y,
int width,
int height)
{
source_image_t *source = (source_image_t *)image;
linear_gradient_t *linear = (linear_gradient_t *)image;
pixman_vector_t v;
pixman_fixed_32_32_t l;
pixman_fixed_48_16_t dx, dy;
double inc;
source_image_class_t class;
 
class = SOURCE_IMAGE_CLASS_UNKNOWN;
 
if (source->common.transform)
{
/* projective transformation */
if (source->common.transform->matrix[2][0] != 0 ||
source->common.transform->matrix[2][1] != 0 ||
source->common.transform->matrix[2][2] == 0)
{
return class;
}
 
v.vector[0] = source->common.transform->matrix[0][1];
v.vector[1] = source->common.transform->matrix[1][1];
v.vector[2] = source->common.transform->matrix[2][2];
}
else
{
v.vector[0] = 0;
v.vector[1] = pixman_fixed_1;
v.vector[2] = pixman_fixed_1;
}
 
dx = linear->p2.x - linear->p1.x;
dy = linear->p2.y - linear->p1.y;
 
l = dx * dx + dy * dy;
 
if (l == 0)
return class;
 
/*
* compute how much the input of the gradient walked changes
* when moving vertically through the whole image
*/
inc = height * (double) pixman_fixed_1 * pixman_fixed_1 *
(dx * v.vector[0] + dy * v.vector[1]) /
(v.vector[2] * (double) l);
 
/* check that casting to integer would result in 0 */
if (-1 < inc && inc < 1)
class = SOURCE_IMAGE_CLASS_HORIZONTAL;
 
return class;
}
 
static void
linear_gradient_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
pixman_vector_t v, unit;
pixman_fixed_32_32_t l;
pixman_fixed_48_16_t dx, dy;
gradient_t *gradient = (gradient_t *)image;
source_image_t *source = (source_image_t *)image;
linear_gradient_t *linear = (linear_gradient_t *)image;
uint32_t *end = buffer + width;
pixman_gradient_walker_t walker;
 
_pixman_gradient_walker_init (&walker, gradient, source->common.repeat);
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
if (source->common.transform)
{
if (!pixman_transform_point_3d (source->common.transform, &v))
return;
unit.vector[0] = source->common.transform->matrix[0][0];
unit.vector[1] = source->common.transform->matrix[1][0];
unit.vector[2] = source->common.transform->matrix[2][0];
}
else
{
unit.vector[0] = pixman_fixed_1;
unit.vector[1] = 0;
unit.vector[2] = 0;
}
 
dx = linear->p2.x - linear->p1.x;
dy = linear->p2.y - linear->p1.y;
 
l = dx * dx + dy * dy;
 
if (l == 0 || unit.vector[2] == 0)
{
/* affine transformation only */
pixman_fixed_32_32_t t, next_inc;
double inc;
 
if (l == 0 || v.vector[2] == 0)
{
t = 0;
inc = 0;
}
else
{
double invden, v2;
 
invden = pixman_fixed_1 * (double) pixman_fixed_1 /
(l * (double) v.vector[2]);
v2 = v.vector[2] * (1. / pixman_fixed_1);
t = ((dx * v.vector[0] + dy * v.vector[1]) -
(dx * linear->p1.x + dy * linear->p1.y) * v2) * invden;
inc = (dx * unit.vector[0] + dy * unit.vector[1]) * invden;
}
next_inc = 0;
 
if (((pixman_fixed_32_32_t )(inc * width)) == 0)
{
register uint32_t color;
 
color = _pixman_gradient_walker_pixel (&walker, t);
while (buffer < end)
*buffer++ = color;
}
else
{
int i;
 
i = 0;
while (buffer < end)
{
if (!mask || *mask++)
{
*buffer = _pixman_gradient_walker_pixel (&walker,
t + next_inc);
}
i++;
next_inc = inc * i;
buffer++;
}
}
}
else
{
/* projective transformation */
double t;
 
t = 0;
 
while (buffer < end)
{
if (!mask || *mask++)
{
if (v.vector[2] != 0)
{
double invden, v2;
 
invden = pixman_fixed_1 * (double) pixman_fixed_1 /
(l * (double) v.vector[2]);
v2 = v.vector[2] * (1. / pixman_fixed_1);
t = ((dx * v.vector[0] + dy * v.vector[1]) -
(dx * linear->p1.x + dy * linear->p1.y) * v2) * invden;
}
 
*buffer = _pixman_gradient_walker_pixel (&walker, t);
}
 
++buffer;
 
v.vector[0] += unit.vector[0];
v.vector[1] += unit.vector[1];
v.vector[2] += unit.vector[2];
}
}
}
 
static void
linear_gradient_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = linear_gradient_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
 
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_linear_gradient (pixman_point_fixed_t * p1,
pixman_point_fixed_t * p2,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image;
linear_gradient_t *linear;
 
image = _pixman_image_allocate ();
 
if (!image)
return NULL;
 
linear = &image->linear;
 
if (!_pixman_init_gradient (&linear->common, stops, n_stops))
{
free (image);
return NULL;
}
 
linear->p1 = *p1;
linear->p2 = *p2;
 
image->type = LINEAR;
image->common.classify = linear_gradient_classify;
image->common.property_changed = linear_gradient_property_changed;
 
return image;
}
 
/programs/develop/libraries/pixman/pixman-matrix.c
0,0 → 1,768
/*
* Copyright © 2008 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
 
/*
* Matrix interfaces
*/
 
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
 
#include <math.h>
#include <string.h>
#include "pixman-private.h"
 
#define F(x) pixman_int_to_fixed (x)
 
PIXMAN_EXPORT void
pixman_transform_init_identity (struct pixman_transform *matrix)
{
int i;
 
memset (matrix, '\0', sizeof (struct pixman_transform));
for (i = 0; i < 3; i++)
matrix->matrix[i][i] = F (1);
}
 
typedef pixman_fixed_32_32_t pixman_fixed_34_30_t;
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_point_3d (const struct pixman_transform *transform,
struct pixman_vector * vector)
{
struct pixman_vector result;
pixman_fixed_32_32_t partial;
pixman_fixed_48_16_t v;
int i, j;
 
for (j = 0; j < 3; j++)
{
v = 0;
for (i = 0; i < 3; i++)
{
partial = ((pixman_fixed_48_16_t) transform->matrix[j][i] *
(pixman_fixed_48_16_t) vector->vector[i]);
v += partial >> 16;
}
if (v > pixman_max_fixed_48_16 || v < pixman_min_fixed_48_16)
return FALSE;
result.vector[j] = (pixman_fixed_t) v;
}
*vector = result;
 
if (!result.vector[2])
return FALSE;
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_point (const struct pixman_transform *transform,
struct pixman_vector * vector)
{
pixman_fixed_32_32_t partial;
pixman_fixed_34_30_t v[3];
pixman_fixed_48_16_t quo;
int i, j;
 
for (j = 0; j < 3; j++)
{
v[j] = 0;
for (i = 0; i < 3; i++)
{
partial = ((pixman_fixed_32_32_t) transform->matrix[j][i] *
(pixman_fixed_32_32_t) vector->vector[i]);
v[j] += partial >> 2;
}
}
if (!(v[2] >> 16))
return FALSE;
 
for (j = 0; j < 2; j++)
{
quo = v[j] / (v[2] >> 16);
if (quo > pixman_max_fixed_48_16 || quo < pixman_min_fixed_48_16)
return FALSE;
vector->vector[j] = (pixman_fixed_t) quo;
}
vector->vector[2] = pixman_fixed_1;
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_multiply (struct pixman_transform * dst,
const struct pixman_transform *l,
const struct pixman_transform *r)
{
struct pixman_transform d;
int dx, dy;
int o;
 
for (dy = 0; dy < 3; dy++)
{
for (dx = 0; dx < 3; dx++)
{
pixman_fixed_48_16_t v;
pixman_fixed_32_32_t partial;
v = 0;
for (o = 0; o < 3; o++)
{
partial =
(pixman_fixed_32_32_t) l->matrix[dy][o] *
(pixman_fixed_32_32_t) r->matrix[o][dx];
 
v += partial >> 16;
}
 
if (v > pixman_max_fixed_48_16 || v < pixman_min_fixed_48_16)
return FALSE;
d.matrix[dy][dx] = (pixman_fixed_t) v;
}
}
 
*dst = d;
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_transform_init_scale (struct pixman_transform *t,
pixman_fixed_t sx,
pixman_fixed_t sy)
{
memset (t, '\0', sizeof (struct pixman_transform));
 
t->matrix[0][0] = sx;
t->matrix[1][1] = sy;
t->matrix[2][2] = F (1);
}
 
static pixman_fixed_t
fixed_inverse (pixman_fixed_t x)
{
return (pixman_fixed_t) ((((pixman_fixed_48_16_t) F (1)) * F (1)) / x);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_scale (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t sx,
pixman_fixed_t sy)
{
struct pixman_transform t;
 
if (sx == 0 || sy == 0)
return FALSE;
 
if (forward)
{
pixman_transform_init_scale (&t, sx, sy);
if (!pixman_transform_multiply (forward, &t, forward))
return FALSE;
}
if (reverse)
{
pixman_transform_init_scale (&t, fixed_inverse (sx),
fixed_inverse (sy));
if (!pixman_transform_multiply (reverse, reverse, &t))
return FALSE;
}
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_transform_init_rotate (struct pixman_transform *t,
pixman_fixed_t c,
pixman_fixed_t s)
{
memset (t, '\0', sizeof (struct pixman_transform));
 
t->matrix[0][0] = c;
t->matrix[0][1] = -s;
t->matrix[1][0] = s;
t->matrix[1][1] = c;
t->matrix[2][2] = F (1);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_rotate (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t c,
pixman_fixed_t s)
{
struct pixman_transform t;
 
if (forward)
{
pixman_transform_init_rotate (&t, c, s);
if (!pixman_transform_multiply (forward, &t, forward))
return FALSE;
}
 
if (reverse)
{
pixman_transform_init_rotate (&t, c, -s);
if (!pixman_transform_multiply (reverse, reverse, &t))
return FALSE;
}
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_transform_init_translate (struct pixman_transform *t,
pixman_fixed_t tx,
pixman_fixed_t ty)
{
memset (t, '\0', sizeof (struct pixman_transform));
 
t->matrix[0][0] = F (1);
t->matrix[0][2] = tx;
t->matrix[1][1] = F (1);
t->matrix[1][2] = ty;
t->matrix[2][2] = F (1);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_translate (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t tx,
pixman_fixed_t ty)
{
struct pixman_transform t;
 
if (forward)
{
pixman_transform_init_translate (&t, tx, ty);
 
if (!pixman_transform_multiply (forward, &t, forward))
return FALSE;
}
 
if (reverse)
{
pixman_transform_init_translate (&t, -tx, -ty);
 
if (!pixman_transform_multiply (reverse, reverse, &t))
return FALSE;
}
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_bounds (const struct pixman_transform *matrix,
struct pixman_box16 * b)
 
{
struct pixman_vector v[4];
int i;
int x1, y1, x2, y2;
 
v[0].vector[0] = F (b->x1);
v[0].vector[1] = F (b->y1);
v[0].vector[2] = F (1);
 
v[1].vector[0] = F (b->x2);
v[1].vector[1] = F (b->y1);
v[1].vector[2] = F (1);
 
v[2].vector[0] = F (b->x2);
v[2].vector[1] = F (b->y2);
v[2].vector[2] = F (1);
 
v[3].vector[0] = F (b->x1);
v[3].vector[1] = F (b->y2);
v[3].vector[2] = F (1);
 
for (i = 0; i < 4; i++)
{
if (!pixman_transform_point (matrix, &v[i]))
return FALSE;
 
x1 = pixman_fixed_to_int (v[i].vector[0]);
y1 = pixman_fixed_to_int (v[i].vector[1]);
x2 = pixman_fixed_to_int (pixman_fixed_ceil (v[i].vector[0]));
y2 = pixman_fixed_to_int (pixman_fixed_ceil (v[i].vector[1]));
 
if (i == 0)
{
b->x1 = x1;
b->y1 = y1;
b->x2 = x2;
b->y2 = y2;
}
else
{
if (x1 < b->x1) b->x1 = x1;
if (y1 < b->y1) b->y1 = y1;
if (x2 > b->x2) b->x2 = x2;
if (y2 > b->y2) b->y2 = y2;
}
}
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_invert (struct pixman_transform * dst,
const struct pixman_transform *src)
{
struct pixman_f_transform m, r;
 
pixman_f_transform_from_pixman_transform (&m, src);
 
if (!pixman_f_transform_invert (&r, &m))
return FALSE;
 
if (!pixman_transform_from_pixman_f_transform (dst, &r))
return FALSE;
 
return TRUE;
}
 
static pixman_bool_t
within_epsilon (pixman_fixed_t a,
pixman_fixed_t b,
pixman_fixed_t epsilon)
{
pixman_fixed_t t = a - b;
 
if (t < 0)
t = -t;
 
return t <= epsilon;
}
 
#define EPSILON (pixman_fixed_t) (2)
 
#define IS_SAME(a, b) (within_epsilon (a, b, EPSILON))
#define IS_ZERO(a) (within_epsilon (a, 0, EPSILON))
#define IS_ONE(a) (within_epsilon (a, F (1), EPSILON))
#define IS_UNIT(a) \
(within_epsilon (a, F (1), EPSILON) || \
within_epsilon (a, F (-1), EPSILON) || \
IS_ZERO (a))
#define IS_INT(a) (IS_ZERO (pixman_fixed_frac (a)))
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_is_identity (const struct pixman_transform *t)
{
return (IS_SAME (t->matrix[0][0], t->matrix[1][1]) &&
IS_SAME (t->matrix[0][0], t->matrix[2][2]) &&
!IS_ZERO (t->matrix[0][0]) &&
IS_ZERO (t->matrix[0][1]) &&
IS_ZERO (t->matrix[0][2]) &&
IS_ZERO (t->matrix[1][0]) &&
IS_ZERO (t->matrix[1][2]) &&
IS_ZERO (t->matrix[2][0]) &&
IS_ZERO (t->matrix[2][1]));
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_is_scale (const struct pixman_transform *t)
{
return (!IS_ZERO (t->matrix[0][0]) &&
IS_ZERO (t->matrix[0][1]) &&
IS_ZERO (t->matrix[0][2]) &&
 
IS_ZERO (t->matrix[1][0]) &&
!IS_ZERO (t->matrix[1][1]) &&
IS_ZERO (t->matrix[1][2]) &&
 
IS_ZERO (t->matrix[2][0]) &&
IS_ZERO (t->matrix[2][1]) &&
!IS_ZERO (t->matrix[2][2]));
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_is_int_translate (const struct pixman_transform *t)
{
return (IS_ONE (t->matrix[0][0]) &&
IS_ZERO (t->matrix[0][1]) &&
IS_INT (t->matrix[0][2]) &&
 
IS_ZERO (t->matrix[1][0]) &&
IS_ONE (t->matrix[1][1]) &&
IS_INT (t->matrix[1][2]) &&
 
IS_ZERO (t->matrix[2][0]) &&
IS_ZERO (t->matrix[2][1]) &&
IS_ONE (t->matrix[2][2]));
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_is_inverse (const struct pixman_transform *a,
const struct pixman_transform *b)
{
struct pixman_transform t;
 
pixman_transform_multiply (&t, a, b);
 
return pixman_transform_is_identity (&t);
}
 
PIXMAN_EXPORT void
pixman_f_transform_from_pixman_transform (struct pixman_f_transform * ft,
const struct pixman_transform *t)
{
int i, j;
 
for (j = 0; j < 3; j++)
{
for (i = 0; i < 3; i++)
ft->m[j][i] = pixman_fixed_to_double (t->matrix[j][i]);
}
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_transform_from_pixman_f_transform (struct pixman_transform * t,
const struct pixman_f_transform *ft)
{
int i, j;
 
for (j = 0; j < 3; j++)
{
for (i = 0; i < 3; i++)
{
double d = ft->m[j][i];
if (d < -32767.0 || d > 32767.0)
return FALSE;
d = d * 65536.0 + 0.5;
t->matrix[j][i] = (pixman_fixed_t) floor (d);
}
}
return TRUE;
}
 
static const int a[3] = { 3, 3, 2 };
static const int b[3] = { 2, 1, 1 };
 
PIXMAN_EXPORT pixman_bool_t
pixman_f_transform_invert (struct pixman_f_transform * dst,
const struct pixman_f_transform *src)
{
double det;
int i, j;
static int a[3] = { 2, 2, 1 };
static int b[3] = { 1, 0, 0 };
 
det = 0;
for (i = 0; i < 3; i++)
{
double p;
int ai = a[i];
int bi = b[i];
p = src->m[i][0] * (src->m[ai][2] * src->m[bi][1] -
src->m[ai][1] * src->m[bi][2]);
if (i == 1)
p = -p;
det += p;
}
if (det == 0)
return FALSE;
det = 1 / det;
for (j = 0; j < 3; j++)
{
for (i = 0; i < 3; i++)
{
double p;
int ai = a[i];
int aj = a[j];
int bi = b[i];
int bj = b[j];
 
p = (src->m[ai][aj] * src->m[bi][bj] -
src->m[ai][bj] * src->m[bi][aj]);
if (((i + j) & 1) != 0)
p = -p;
dst->m[j][i] = det * p;
}
}
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_f_transform_point (const struct pixman_f_transform *t,
struct pixman_f_vector * v)
{
struct pixman_f_vector result;
int i, j;
double a;
 
for (j = 0; j < 3; j++)
{
a = 0;
for (i = 0; i < 3; i++)
a += t->m[j][i] * v->v[i];
result.v[j] = a;
}
if (!result.v[2])
return FALSE;
 
for (j = 0; j < 2; j++)
v->v[j] = result.v[j] / result.v[2];
 
v->v[2] = 1;
 
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_f_transform_point_3d (const struct pixman_f_transform *t,
struct pixman_f_vector * v)
{
struct pixman_f_vector result;
int i, j;
double a;
 
for (j = 0; j < 3; j++)
{
a = 0;
for (i = 0; i < 3; i++)
a += t->m[j][i] * v->v[i];
result.v[j] = a;
}
*v = result;
}
 
PIXMAN_EXPORT void
pixman_f_transform_multiply (struct pixman_f_transform * dst,
const struct pixman_f_transform *l,
const struct pixman_f_transform *r)
{
struct pixman_f_transform d;
int dx, dy;
int o;
 
for (dy = 0; dy < 3; dy++)
{
for (dx = 0; dx < 3; dx++)
{
double v = 0;
for (o = 0; o < 3; o++)
v += l->m[dy][o] * r->m[o][dx];
d.m[dy][dx] = v;
}
}
*dst = d;
}
 
PIXMAN_EXPORT void
pixman_f_transform_init_scale (struct pixman_f_transform *t,
double sx,
double sy)
{
t->m[0][0] = sx;
t->m[0][1] = 0;
t->m[0][2] = 0;
t->m[1][0] = 0;
t->m[1][1] = sy;
t->m[1][2] = 0;
t->m[2][0] = 0;
t->m[2][1] = 0;
t->m[2][2] = 1;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_f_transform_scale (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double sx,
double sy)
{
struct pixman_f_transform t;
 
if (sx == 0 || sy == 0)
return FALSE;
 
if (forward)
{
pixman_f_transform_init_scale (&t, sx, sy);
pixman_f_transform_multiply (forward, &t, forward);
}
if (reverse)
{
pixman_f_transform_init_scale (&t, 1 / sx, 1 / sy);
pixman_f_transform_multiply (reverse, reverse, &t);
}
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_f_transform_init_rotate (struct pixman_f_transform *t,
double c,
double s)
{
t->m[0][0] = c;
t->m[0][1] = -s;
t->m[0][2] = 0;
t->m[1][0] = s;
t->m[1][1] = c;
t->m[1][2] = 0;
t->m[2][0] = 0;
t->m[2][1] = 0;
t->m[2][2] = 1;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_f_transform_rotate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double c,
double s)
{
struct pixman_f_transform t;
 
if (forward)
{
pixman_f_transform_init_rotate (&t, c, s);
pixman_f_transform_multiply (forward, &t, forward);
}
if (reverse)
{
pixman_f_transform_init_rotate (&t, c, -s);
pixman_f_transform_multiply (reverse, reverse, &t);
}
 
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_f_transform_init_translate (struct pixman_f_transform *t,
double tx,
double ty)
{
t->m[0][0] = 1;
t->m[0][1] = 0;
t->m[0][2] = tx;
t->m[1][0] = 0;
t->m[1][1] = 1;
t->m[1][2] = ty;
t->m[2][0] = 0;
t->m[2][1] = 0;
t->m[2][2] = 1;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_f_transform_translate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double tx,
double ty)
{
struct pixman_f_transform t;
 
if (forward)
{
pixman_f_transform_init_translate (&t, tx, ty);
pixman_f_transform_multiply (forward, &t, forward);
}
 
if (reverse)
{
pixman_f_transform_init_translate (&t, -tx, -ty);
pixman_f_transform_multiply (reverse, reverse, &t);
}
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_f_transform_bounds (const struct pixman_f_transform *t,
struct pixman_box16 * b)
{
struct pixman_f_vector v[4];
int i;
int x1, y1, x2, y2;
 
v[0].v[0] = b->x1;
v[0].v[1] = b->y1;
v[0].v[2] = 1;
v[1].v[0] = b->x2;
v[1].v[1] = b->y1;
v[1].v[2] = 1;
v[2].v[0] = b->x2;
v[2].v[1] = b->y2;
v[2].v[2] = 1;
v[3].v[0] = b->x1;
v[3].v[1] = b->y2;
v[3].v[2] = 1;
 
for (i = 0; i < 4; i++)
{
if (!pixman_f_transform_point (t, &v[i]))
return FALSE;
 
x1 = floor (v[i].v[0]);
y1 = floor (v[i].v[1]);
x2 = ceil (v[i].v[0]);
y2 = ceil (v[i].v[1]);
 
if (i == 0)
{
b->x1 = x1;
b->y1 = y1;
b->x2 = x2;
b->y2 = y2;
}
else
{
if (x1 < b->x1) b->x1 = x1;
if (y1 < b->y1) b->y1 = y1;
if (x2 > b->x2) b->x2 = x2;
if (y2 > b->y2) b->y2 = y2;
}
}
 
return TRUE;
}
 
PIXMAN_EXPORT void
pixman_f_transform_init_identity (struct pixman_f_transform *t)
{
int i, j;
 
for (j = 0; j < 3; j++)
{
for (i = 0; i < 3; i++)
t->m[j][i] = i == j ? 1 : 0;
}
}
/programs/develop/libraries/pixman/pixman-mmx.c
0,0 → 1,3378
/*
* Copyright © 2004, 2005 Red Hat, Inc.
* Copyright © 2004 Nicholas Miell
* Copyright © 2005 Trolltech AS
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Red Hat not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Red Hat makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Author: Søren Sandmann (sandmann@redhat.com)
* Minor Improvements: Nicholas Miell (nmiell@gmail.com)
* MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com)
*
* Based on work by Owen Taylor
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#ifdef USE_MMX
 
#include <mmintrin.h>
#include "pixman-private.h"
#include "pixman-combine32.h"
 
#define no_vERBOSE
 
#ifdef VERBOSE
#define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__)
#else
#define CHECKPOINT()
#endif
 
/* Notes about writing mmx code
*
* give memory operands as the second operand. If you give it as the
* first, gcc will first load it into a register, then use that
* register
*
* ie. use
*
* _mm_mullo_pi16 (x, mmx_constant);
*
* not
*
* _mm_mullo_pi16 (mmx_constant, x);
*
* Also try to minimize dependencies. i.e. when you need a value, try
* to calculate it from a value that was calculated as early as
* possible.
*/
 
/* --------------- MMX primitives ------------------------------------- */
 
#ifdef __GNUC__
typedef uint64_t mmxdatafield;
#else
typedef __m64 mmxdatafield;
/* If __m64 is defined as a struct or union, define M64_MEMBER to be the
name of the member used to access the data */
# ifdef _MSC_VER
# define M64_MEMBER m64_u64
# elif defined(__SUNPRO_C)
# define M64_MEMBER l_
# endif
#endif
 
typedef struct
{
mmxdatafield mmx_4x00ff;
mmxdatafield mmx_4x0080;
mmxdatafield mmx_565_rgb;
mmxdatafield mmx_565_unpack_multiplier;
mmxdatafield mmx_565_r;
mmxdatafield mmx_565_g;
mmxdatafield mmx_565_b;
mmxdatafield mmx_mask_0;
mmxdatafield mmx_mask_1;
mmxdatafield mmx_mask_2;
mmxdatafield mmx_mask_3;
mmxdatafield mmx_full_alpha;
mmxdatafield mmx_ffff0000ffff0000;
mmxdatafield mmx_0000ffff00000000;
mmxdatafield mmx_000000000000ffff;
} mmx_data_t;
 
#if defined(_MSC_VER)
# define MMXDATA_INIT(field, val) { val ## UI64 }
#elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */
# define MMXDATA_INIT(field, val) field = { val ## ULL }
#else /* __m64 is an integral type */
# define MMXDATA_INIT(field, val) field = val ## ULL
#endif
 
static const mmx_data_t c =
{
MMXDATA_INIT (.mmx_4x00ff, 0x00ff00ff00ff00ff),
MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080),
MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f),
MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840),
MMXDATA_INIT (.mmx_565_r, 0x000000f800000000),
MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000),
MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8),
MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000),
MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff),
MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff),
MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff),
MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000),
MMXDATA_INIT (.mmx_ffff0000ffff0000, 0xffff0000ffff0000),
MMXDATA_INIT (.mmx_0000ffff00000000, 0x0000ffff00000000),
MMXDATA_INIT (.mmx_000000000000ffff, 0x000000000000ffff),
};
 
#ifdef __GNUC__
# ifdef __ICC
# define MC(x) to_m64 (c.mmx_ ## x)
# else
# define MC(x) ((__m64)c.mmx_ ## x)
# endif
#else
# define MC(x) c.mmx_ ## x
#endif
 
static force_inline __m64
to_m64 (uint64_t x)
{
#ifdef __ICC
return _mm_cvtsi64_m64 (x);
#elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
__m64 res;
 
res.M64_MEMBER = x;
return res;
#else /* __m64 is an integral type */
return (__m64)x;
#endif
}
 
static force_inline uint64_t
to_uint64 (__m64 x)
{
#ifdef __ICC
return _mm_cvtm64_si64 (x);
#elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
uint64_t res = x.M64_MEMBER;
return res;
#else /* __m64 is an integral type */
return (uint64_t)x;
#endif
}
 
static force_inline __m64
shift (__m64 v,
int s)
{
if (s > 0)
return _mm_slli_si64 (v, s);
else if (s < 0)
return _mm_srli_si64 (v, -s);
else
return v;
}
 
static force_inline __m64
negate (__m64 mask)
{
return _mm_xor_si64 (mask, MC (4x00ff));
}
 
static force_inline __m64
pix_multiply (__m64 a, __m64 b)
{
__m64 res;
 
res = _mm_mullo_pi16 (a, b);
res = _mm_adds_pu16 (res, MC (4x0080));
res = _mm_adds_pu16 (res, _mm_srli_pi16 (res, 8));
res = _mm_srli_pi16 (res, 8);
 
return res;
}
 
static force_inline __m64
pix_add (__m64 a, __m64 b)
{
return _mm_adds_pu8 (a, b);
}
 
static force_inline __m64
expand_alpha (__m64 pixel)
{
__m64 t1, t2;
 
t1 = shift (pixel, -48);
t2 = shift (t1, 16);
t1 = _mm_or_si64 (t1, t2);
t2 = shift (t1, 32);
t1 = _mm_or_si64 (t1, t2);
 
return t1;
}
 
static force_inline __m64
expand_alpha_rev (__m64 pixel)
{
__m64 t1, t2;
 
/* move alpha to low 16 bits and zero the rest */
t1 = shift (pixel, 48);
t1 = shift (t1, -48);
 
t2 = shift (t1, 16);
t1 = _mm_or_si64 (t1, t2);
t2 = shift (t1, 32);
t1 = _mm_or_si64 (t1, t2);
 
return t1;
}
 
static force_inline __m64
invert_colors (__m64 pixel)
{
__m64 x, y, z;
 
x = y = z = pixel;
 
x = _mm_and_si64 (x, MC (ffff0000ffff0000));
y = _mm_and_si64 (y, MC (000000000000ffff));
z = _mm_and_si64 (z, MC (0000ffff00000000));
 
y = shift (y, 32);
z = shift (z, -32);
 
x = _mm_or_si64 (x, y);
x = _mm_or_si64 (x, z);
 
return x;
}
 
static force_inline __m64
over (__m64 src,
__m64 srca,
__m64 dest)
{
return _mm_adds_pu8 (src, pix_multiply (dest, negate (srca)));
}
 
static force_inline __m64
over_rev_non_pre (__m64 src, __m64 dest)
{
__m64 srca = expand_alpha (src);
__m64 srcfaaa = _mm_or_si64 (srca, MC (full_alpha));
 
return over (pix_multiply (invert_colors (src), srcfaaa), srca, dest);
}
 
static force_inline __m64
in (__m64 src, __m64 mask)
{
return pix_multiply (src, mask);
}
 
static force_inline __m64
in_over_full_src_alpha (__m64 src, __m64 mask, __m64 dest)
{
src = _mm_or_si64 (src, MC (full_alpha));
 
return over (in (src, mask), mask, dest);
}
 
#ifndef _MSC_VER
static force_inline __m64
in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest)
{
return over (in (src, mask), pix_multiply (srca, mask), dest);
}
 
#else
 
#define in_over(src, srca, mask, dest) \
over (in (src, mask), pix_multiply (srca, mask), dest)
 
#endif
 
static force_inline __m64
load8888 (uint32_t v)
{
return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (v), _mm_setzero_si64 ());
}
 
static force_inline __m64
pack8888 (__m64 lo, __m64 hi)
{
return _mm_packs_pu16 (lo, hi);
}
 
static force_inline uint32_t
store8888 (__m64 v)
{
return _mm_cvtsi64_si32 (pack8888 (v, _mm_setzero_si64 ()));
}
 
/* Expand 16 bits positioned at @pos (0-3) of a mmx register into
*
* 00RR00GG00BB
*
* --- Expanding 565 in the low word ---
*
* m = (m << (32 - 3)) | (m << (16 - 5)) | m;
* m = m & (01f0003f001f);
* m = m * (008404100840);
* m = m >> 8;
*
* Note the trick here - the top word is shifted by another nibble to
* avoid it bumping into the middle word
*/
static force_inline __m64
expand565 (__m64 pixel, int pos)
{
__m64 p = pixel;
__m64 t1, t2;
 
/* move pixel to low 16 bit and zero the rest */
p = shift (shift (p, (3 - pos) * 16), -48);
 
t1 = shift (p, 36 - 11);
t2 = shift (p, 16 - 5);
 
p = _mm_or_si64 (t1, p);
p = _mm_or_si64 (t2, p);
p = _mm_and_si64 (p, MC (565_rgb));
 
pixel = _mm_mullo_pi16 (p, MC (565_unpack_multiplier));
return _mm_srli_pi16 (pixel, 8);
}
 
static force_inline __m64
expand8888 (__m64 in, int pos)
{
if (pos == 0)
return _mm_unpacklo_pi8 (in, _mm_setzero_si64 ());
else
return _mm_unpackhi_pi8 (in, _mm_setzero_si64 ());
}
 
static force_inline __m64
expandx888 (__m64 in, int pos)
{
return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha));
}
 
static force_inline __m64
pack_565 (__m64 pixel, __m64 target, int pos)
{
__m64 p = pixel;
__m64 t = target;
__m64 r, g, b;
 
r = _mm_and_si64 (p, MC (565_r));
g = _mm_and_si64 (p, MC (565_g));
b = _mm_and_si64 (p, MC (565_b));
 
r = shift (r, -(32 - 8) + pos * 16);
g = shift (g, -(16 - 3) + pos * 16);
b = shift (b, -(0 + 3) + pos * 16);
 
if (pos == 0)
t = _mm_and_si64 (t, MC (mask_0));
else if (pos == 1)
t = _mm_and_si64 (t, MC (mask_1));
else if (pos == 2)
t = _mm_and_si64 (t, MC (mask_2));
else if (pos == 3)
t = _mm_and_si64 (t, MC (mask_3));
 
p = _mm_or_si64 (r, t);
p = _mm_or_si64 (g, p);
 
return _mm_or_si64 (b, p);
}
 
#ifndef _MSC_VER
 
static force_inline __m64
pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
{
x = pix_multiply (x, a);
y = pix_multiply (y, b);
 
return pix_add (x, y);
}
 
#else
 
#define pix_add_mul(x, a, y, b) \
( x = pix_multiply (x, a), \
y = pix_multiply (y, a), \
pix_add (x, y) )
 
#endif
 
/* --------------- MMX code patch for fbcompose.c --------------------- */
 
static force_inline uint32_t
combine (const uint32_t *src, const uint32_t *mask)
{
uint32_t ssrc = *src;
 
if (mask)
{
__m64 m = load8888 (*mask);
__m64 s = load8888 (ssrc);
 
m = expand_alpha (m);
s = pix_multiply (s, m);
 
ssrc = store8888 (s);
}
 
return ssrc;
}
 
static void
mmx_combine_over_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
uint32_t ssrc = combine (src, mask);
uint32_t a = ssrc >> 24;
 
if (a == 0xff)
{
*dest = ssrc;
}
else if (ssrc)
{
__m64 s, sa;
s = load8888 (ssrc);
sa = expand_alpha (s);
*dest = store8888 (over (s, sa, load8888 (*dest)));
}
 
++dest;
++src;
if (mask)
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_over_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 d, da;
uint32_t s = combine (src, mask);
 
d = load8888 (*dest);
da = expand_alpha (d);
*dest = store8888 (over (d, da, load8888 (s)));
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_in_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 x, a;
 
x = load8888 (combine (src, mask));
a = load8888 (*dest);
a = expand_alpha (a);
x = pix_multiply (x, a);
 
*dest = store8888 (x);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_in_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 x, a;
 
x = load8888 (*dest);
a = load8888 (combine (src, mask));
a = expand_alpha (a);
x = pix_multiply (x, a);
*dest = store8888 (x);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_out_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 x, a;
 
x = load8888 (combine (src, mask));
a = load8888 (*dest);
a = expand_alpha (a);
a = negate (a);
x = pix_multiply (x, a);
*dest = store8888 (x);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_out_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 x, a;
 
x = load8888 (*dest);
a = load8888 (combine (src, mask));
a = expand_alpha (a);
a = negate (a);
x = pix_multiply (x, a);
 
*dest = store8888 (x);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_atop_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 s, da, d, sia;
 
s = load8888 (combine (src, mask));
d = load8888 (*dest);
sia = expand_alpha (s);
sia = negate (sia);
da = expand_alpha (d);
s = pix_add_mul (s, da, d, sia);
*dest = store8888 (s);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_atop_reverse_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end;
 
end = dest + width;
 
while (dest < end)
{
__m64 s, dia, d, sa;
 
s = load8888 (combine (src, mask));
d = load8888 (*dest);
sa = expand_alpha (s);
dia = expand_alpha (d);
dia = negate (dia);
s = pix_add_mul (s, dia, d, sa);
*dest = store8888 (s);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_xor_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 s, dia, d, sia;
 
s = load8888 (combine (src, mask));
d = load8888 (*dest);
sia = expand_alpha (s);
dia = expand_alpha (d);
sia = negate (sia);
dia = negate (dia);
s = pix_add_mul (s, dia, d, sia);
*dest = store8888 (s);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_add_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
__m64 s, d;
 
s = load8888 (combine (src, mask));
d = load8888 (*dest);
s = pix_add (s, d);
*dest = store8888 (s);
 
++dest;
++src;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_saturate_u (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = dest + width;
 
while (dest < end)
{
uint32_t s = combine (src, mask);
uint32_t d = *dest;
__m64 ms = load8888 (s);
__m64 md = load8888 (d);
uint32_t sa = s >> 24;
uint32_t da = ~d >> 24;
 
if (sa > da)
{
__m64 msa = load8888 (DIV_UN8 (da, sa) << 24);
msa = expand_alpha (msa);
ms = pix_multiply (ms, msa);
}
 
md = pix_add (md, ms);
*dest = store8888 (md);
 
++src;
++dest;
if (mask)
mask++;
}
_mm_empty ();
}
 
static void
mmx_combine_src_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
 
s = pix_multiply (s, a);
*dest = store8888 (s);
 
++src;
++mask;
++dest;
}
_mm_empty ();
}
 
static void
mmx_combine_over_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 sa = expand_alpha (s);
 
*dest = store8888 (in_over (s, sa, a, d));
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_over_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 da = expand_alpha (d);
 
*dest = store8888 (over (d, da, in (s, a)));
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_in_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 da = expand_alpha (d);
 
s = pix_multiply (s, a);
s = pix_multiply (s, da);
*dest = store8888 (s);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_in_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 sa = expand_alpha (s);
 
a = pix_multiply (a, sa);
d = pix_multiply (d, a);
*dest = store8888 (d);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_out_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 da = expand_alpha (d);
 
da = negate (da);
s = pix_multiply (s, a);
s = pix_multiply (s, da);
*dest = store8888 (s);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_out_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 sa = expand_alpha (s);
 
a = pix_multiply (a, sa);
a = negate (a);
d = pix_multiply (d, a);
*dest = store8888 (d);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_atop_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 da = expand_alpha (d);
__m64 sa = expand_alpha (s);
 
s = pix_multiply (s, a);
a = pix_multiply (a, sa);
a = negate (a);
d = pix_add_mul (d, a, s, da);
*dest = store8888 (d);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 da = expand_alpha (d);
__m64 sa = expand_alpha (s);
 
s = pix_multiply (s, a);
a = pix_multiply (a, sa);
da = negate (da);
d = pix_add_mul (d, a, s, da);
*dest = store8888 (d);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_xor_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
__m64 da = expand_alpha (d);
__m64 sa = expand_alpha (s);
 
s = pix_multiply (s, a);
a = pix_multiply (a, sa);
da = negate (da);
a = negate (a);
d = pix_add_mul (d, a, s, da);
*dest = store8888 (d);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
static void
mmx_combine_add_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width)
{
const uint32_t *end = src + width;
 
while (src < end)
{
__m64 a = load8888 (*mask);
__m64 s = load8888 (*src);
__m64 d = load8888 (*dest);
 
s = pix_multiply (s, a);
d = pix_add (s, d);
*dest = store8888 (d);
 
++src;
++dest;
++mask;
}
_mm_empty ();
}
 
/* ------------- MMX code paths called from fbpict.c -------------------- */
 
static void
mmx_composite_over_n_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src;
uint32_t *dst_line, *dst;
int32_t w;
int dst_stride;
__m64 vsrc, vsrca;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
*dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
 
w--;
dst++;
}
 
while (w >= 2)
{
__m64 vdest;
__m64 dest0, dest1;
 
vdest = *(__m64 *)dst;
 
dest0 = over (vsrc, vsrca, expand8888 (vdest, 0));
dest1 = over (vsrc, vsrca, expand8888 (vdest, 1));
 
*(__m64 *)dst = pack8888 (dest0, dest1);
 
dst += 2;
w -= 2;
}
 
CHECKPOINT ();
 
while (w)
{
*dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
 
w--;
dst++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_n_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src;
uint16_t *dst_line, *dst;
int32_t w;
int dst_stride;
__m64 vsrc, vsrca;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
uint64_t d = *dst;
__m64 vdest = expand565 (to_m64 (d), 0);
 
vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
*dst = to_uint64 (vdest);
 
w--;
dst++;
}
 
while (w >= 4)
{
__m64 vdest;
 
vdest = *(__m64 *)dst;
 
vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 0)), vdest, 0);
vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 1)), vdest, 1);
vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 2)), vdest, 2);
vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 3)), vdest, 3);
 
*(__m64 *)dst = vdest;
 
dst += 4;
w -= 4;
}
 
CHECKPOINT ();
 
while (w)
{
uint64_t d = *dst;
__m64 vdest = expand565 (to_m64 (d), 0);
 
vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
*dst = to_uint64 (vdest);
 
w--;
dst++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint32_t *dst_line;
uint32_t *mask_line;
int dst_stride, mask_stride;
__m64 vsrc, vsrca;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
int twidth = width;
uint32_t *p = (uint32_t *)mask_line;
uint32_t *q = (uint32_t *)dst_line;
 
while (twidth && (unsigned long)q & 7)
{
uint32_t m = *(uint32_t *)p;
 
if (m)
{
__m64 vdest = load8888 (*q);
vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
*q = store8888 (vdest);
}
 
twidth--;
p++;
q++;
}
 
while (twidth >= 2)
{
uint32_t m0, m1;
m0 = *p;
m1 = *(p + 1);
 
if (m0 | m1)
{
__m64 dest0, dest1;
__m64 vdest = *(__m64 *)q;
 
dest0 = in_over (vsrc, vsrca, load8888 (m0),
expand8888 (vdest, 0));
dest1 = in_over (vsrc, vsrca, load8888 (m1),
expand8888 (vdest, 1));
 
*(__m64 *)q = pack8888 (dest0, dest1);
}
 
p += 2;
q += 2;
twidth -= 2;
}
 
while (twidth)
{
uint32_t m = *(uint32_t *)p;
 
if (m)
{
__m64 vdest = load8888 (*q);
vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
*q = store8888 (vdest);
}
 
twidth--;
p++;
q++;
}
 
dst_line += dst_stride;
mask_line += mask_stride;
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
uint32_t mask;
__m64 vmask;
int dst_stride, src_stride;
int32_t w;
__m64 srca;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
mask &= 0xff000000;
mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
vmask = load8888 (mask);
srca = MC (4x00ff);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w && (unsigned long)dst & 7)
{
__m64 s = load8888 (*src);
__m64 d = load8888 (*dst);
 
*dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
 
w--;
dst++;
src++;
}
 
while (w >= 2)
{
__m64 vs = *(__m64 *)src;
__m64 vd = *(__m64 *)dst;
__m64 vsrc0 = expand8888 (vs, 0);
__m64 vsrc1 = expand8888 (vs, 1);
 
*(__m64 *)dst = pack8888 (
in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)),
in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1)));
 
w -= 2;
dst += 2;
src += 2;
}
 
while (w)
{
__m64 s = load8888 (*src);
__m64 d = load8888 (*dst);
 
*dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
 
w--;
dst++;
src++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
uint32_t mask;
__m64 vmask;
int dst_stride, src_stride;
int32_t w;
__m64 srca;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
mask &= 0xff000000;
mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
vmask = load8888 (mask);
srca = MC (4x00ff);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w && (unsigned long)dst & 7)
{
__m64 s = load8888 (*src | 0xff000000);
__m64 d = load8888 (*dst);
 
*dst = store8888 (in_over (s, srca, vmask, d));
 
w--;
dst++;
src++;
}
 
while (w >= 16)
{
__m64 vd0 = *(__m64 *)(dst + 0);
__m64 vd1 = *(__m64 *)(dst + 2);
__m64 vd2 = *(__m64 *)(dst + 4);
__m64 vd3 = *(__m64 *)(dst + 6);
__m64 vd4 = *(__m64 *)(dst + 8);
__m64 vd5 = *(__m64 *)(dst + 10);
__m64 vd6 = *(__m64 *)(dst + 12);
__m64 vd7 = *(__m64 *)(dst + 14);
 
__m64 vs0 = *(__m64 *)(src + 0);
__m64 vs1 = *(__m64 *)(src + 2);
__m64 vs2 = *(__m64 *)(src + 4);
__m64 vs3 = *(__m64 *)(src + 6);
__m64 vs4 = *(__m64 *)(src + 8);
__m64 vs5 = *(__m64 *)(src + 10);
__m64 vs6 = *(__m64 *)(src + 12);
__m64 vs7 = *(__m64 *)(src + 14);
 
vd0 = pack8888 (
in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1)));
 
vd1 = pack8888 (
in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)),
in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1)));
 
vd2 = pack8888 (
in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)),
in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1)));
 
vd3 = pack8888 (
in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)),
in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1)));
 
vd4 = pack8888 (
in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)),
in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1)));
 
vd5 = pack8888 (
in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)),
in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1)));
 
vd6 = pack8888 (
in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)),
in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1)));
 
vd7 = pack8888 (
in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)),
in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1)));
 
*(__m64 *)(dst + 0) = vd0;
*(__m64 *)(dst + 2) = vd1;
*(__m64 *)(dst + 4) = vd2;
*(__m64 *)(dst + 6) = vd3;
*(__m64 *)(dst + 8) = vd4;
*(__m64 *)(dst + 10) = vd5;
*(__m64 *)(dst + 12) = vd6;
*(__m64 *)(dst + 14) = vd7;
 
w -= 16;
dst += 16;
src += 16;
}
 
while (w)
{
__m64 s = load8888 (*src | 0xff000000);
__m64 d = load8888 (*dst);
 
*dst = store8888 (in_over (s, srca, vmask, d));
 
w--;
dst++;
src++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
uint32_t s;
int dst_stride, src_stride;
uint8_t a;
int32_t w;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w--)
{
s = *src++;
a = s >> 24;
 
if (a == 0xff)
{
*dst = s;
}
else if (s)
{
__m64 ms, sa;
ms = load8888 (s);
sa = expand_alpha (ms);
*dst = store8888 (over (ms, sa, load8888 (*dst)));
}
 
dst++;
}
}
_mm_empty ();
}
 
static void
mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint16_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
#if 0
/* FIXME */
assert (src_image->drawable == mask_image->drawable);
#endif
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
__m64 vsrc = load8888 (*src);
uint64_t d = *dst;
__m64 vdest = expand565 (to_m64 (d), 0);
 
vdest = pack_565 (
over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
 
*dst = to_uint64 (vdest);
 
w--;
dst++;
src++;
}
 
CHECKPOINT ();
 
while (w >= 4)
{
__m64 vsrc0, vsrc1, vsrc2, vsrc3;
__m64 vdest;
 
vsrc0 = load8888 (*(src + 0));
vsrc1 = load8888 (*(src + 1));
vsrc2 = load8888 (*(src + 2));
vsrc3 = load8888 (*(src + 3));
 
vdest = *(__m64 *)dst;
 
vdest = pack_565 (over (vsrc0, expand_alpha (vsrc0), expand565 (vdest, 0)), vdest, 0);
vdest = pack_565 (over (vsrc1, expand_alpha (vsrc1), expand565 (vdest, 1)), vdest, 1);
vdest = pack_565 (over (vsrc2, expand_alpha (vsrc2), expand565 (vdest, 2)), vdest, 2);
vdest = pack_565 (over (vsrc3, expand_alpha (vsrc3), expand565 (vdest, 3)), vdest, 3);
 
*(__m64 *)dst = vdest;
 
w -= 4;
dst += 4;
src += 4;
}
 
CHECKPOINT ();
 
while (w)
{
__m64 vsrc = load8888 (*src);
uint64_t d = *dst;
__m64 vdest = expand565 (to_m64 (d), 0);
 
vdest = pack_565 (over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
 
*dst = to_uint64 (vdest);
 
w--;
dst++;
src++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint32_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
int32_t w;
__m64 vsrc, vsrca;
uint64_t srcsrc;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
srcsrc = (uint64_t)src << 32 | src;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
uint64_t m = *mask;
 
if (m)
{
__m64 vdest = in_over (vsrc, vsrca,
expand_alpha_rev (to_m64 (m)),
load8888 (*dst));
 
*dst = store8888 (vdest);
}
 
w--;
mask++;
dst++;
}
 
CHECKPOINT ();
 
while (w >= 2)
{
uint64_t m0, m1;
 
m0 = *mask;
m1 = *(mask + 1);
 
if (srca == 0xff && (m0 & m1) == 0xff)
{
*(uint64_t *)dst = srcsrc;
}
else if (m0 | m1)
{
__m64 vdest;
__m64 dest0, dest1;
 
vdest = *(__m64 *)dst;
 
dest0 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m0)),
expand8888 (vdest, 0));
dest1 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m1)),
expand8888 (vdest, 1));
 
*(__m64 *)dst = pack8888 (dest0, dest1);
}
 
mask += 2;
dst += 2;
w -= 2;
}
 
CHECKPOINT ();
 
while (w)
{
uint64_t m = *mask;
 
if (m)
{
__m64 vdest = load8888 (*dst);
 
vdest = in_over (
vsrc, vsrca, expand_alpha_rev (to_m64 (m)), vdest);
*dst = store8888 (vdest);
}
 
w--;
mask++;
dst++;
}
}
 
_mm_empty ();
}
 
pixman_bool_t
pixman_fill_mmx (uint32_t *bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
uint64_t fill;
__m64 vfill;
uint32_t byte_width;
uint8_t *byte_line;
 
#ifdef __GNUC__
__m64 v1, v2, v3, v4, v5, v6, v7;
#endif
 
if (bpp != 16 && bpp != 32 && bpp != 8)
return FALSE;
 
if (bpp == 8)
{
stride = stride * (int) sizeof (uint32_t) / 1;
byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
byte_width = width;
stride *= 1;
xor = (xor & 0xff) * 0x01010101;
}
else if (bpp == 16)
{
stride = stride * (int) sizeof (uint32_t) / 2;
byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
byte_width = 2 * width;
stride *= 2;
xor = (xor & 0xffff) * 0x00010001;
}
else
{
stride = stride * (int) sizeof (uint32_t) / 4;
byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
byte_width = 4 * width;
stride *= 4;
}
 
fill = ((uint64_t)xor << 32) | xor;
vfill = to_m64 (fill);
 
#ifdef __GNUC__
__asm__ (
"movq %7, %0\n"
"movq %7, %1\n"
"movq %7, %2\n"
"movq %7, %3\n"
"movq %7, %4\n"
"movq %7, %5\n"
"movq %7, %6\n"
: "=&y" (v1), "=&y" (v2), "=&y" (v3),
"=&y" (v4), "=&y" (v5), "=&y" (v6), "=y" (v7)
: "y" (vfill));
#endif
 
while (height--)
{
int w;
uint8_t *d = byte_line;
 
byte_line += stride;
w = byte_width;
 
while (w >= 1 && ((unsigned long)d & 1))
{
*(uint8_t *)d = (xor & 0xff);
w--;
d++;
}
 
while (w >= 2 && ((unsigned long)d & 3))
{
*(uint16_t *)d = xor;
w -= 2;
d += 2;
}
 
while (w >= 4 && ((unsigned long)d & 7))
{
*(uint32_t *)d = xor;
 
w -= 4;
d += 4;
}
 
while (w >= 64)
{
#ifdef __GNUC__
__asm__ (
"movq %1, (%0)\n"
"movq %2, 8(%0)\n"
"movq %3, 16(%0)\n"
"movq %4, 24(%0)\n"
"movq %5, 32(%0)\n"
"movq %6, 40(%0)\n"
"movq %7, 48(%0)\n"
"movq %8, 56(%0)\n"
:
: "r" (d),
"y" (vfill), "y" (v1), "y" (v2), "y" (v3),
"y" (v4), "y" (v5), "y" (v6), "y" (v7)
: "memory");
#else
*(__m64*) (d + 0) = vfill;
*(__m64*) (d + 8) = vfill;
*(__m64*) (d + 16) = vfill;
*(__m64*) (d + 24) = vfill;
*(__m64*) (d + 32) = vfill;
*(__m64*) (d + 40) = vfill;
*(__m64*) (d + 48) = vfill;
*(__m64*) (d + 56) = vfill;
#endif
w -= 64;
d += 64;
}
 
while (w >= 4)
{
*(uint32_t *)d = xor;
 
w -= 4;
d += 4;
}
while (w >= 2)
{
*(uint16_t *)d = xor;
w -= 2;
d += 2;
}
while (w >= 1)
{
*(uint8_t *)d = (xor & 0xff);
w--;
d++;
}
 
}
 
_mm_empty ();
return TRUE;
}
 
static void
mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint32_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
int32_t w;
__m64 vsrc, vsrca;
uint64_t srcsrc;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
{
pixman_fill_mmx (dst_image->bits.bits, dst_image->bits.rowstride,
PIXMAN_FORMAT_BPP (dst_image->bits.format),
dest_x, dest_y, width, height, 0);
return;
}
 
srcsrc = (uint64_t)src << 32 | src;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
uint64_t m = *mask;
 
if (m)
{
__m64 vdest = in (vsrc, expand_alpha_rev (to_m64 (m)));
 
*dst = store8888 (vdest);
}
else
{
*dst = 0;
}
 
w--;
mask++;
dst++;
}
 
CHECKPOINT ();
 
while (w >= 2)
{
uint64_t m0, m1;
m0 = *mask;
m1 = *(mask + 1);
 
if (srca == 0xff && (m0 & m1) == 0xff)
{
*(uint64_t *)dst = srcsrc;
}
else if (m0 | m1)
{
__m64 vdest;
__m64 dest0, dest1;
 
vdest = *(__m64 *)dst;
 
dest0 = in (vsrc, expand_alpha_rev (to_m64 (m0)));
dest1 = in (vsrc, expand_alpha_rev (to_m64 (m1)));
 
*(__m64 *)dst = pack8888 (dest0, dest1);
}
else
{
*(uint64_t *)dst = 0;
}
 
mask += 2;
dst += 2;
w -= 2;
}
 
CHECKPOINT ();
 
while (w)
{
uint64_t m = *mask;
 
if (m)
{
__m64 vdest = load8888 (*dst);
 
vdest = in (vsrc, expand_alpha_rev (to_m64 (m)));
*dst = store8888 (vdest);
}
else
{
*dst = 0;
}
 
w--;
mask++;
dst++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint16_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
int32_t w;
__m64 vsrc, vsrca, tmp;
uint64_t srcsrcsrcsrc, src16;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0);
src16 = to_uint64 (tmp);
 
srcsrcsrcsrc =
(uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
(uint64_t)src16 << 16 | (uint64_t)src16;
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
uint64_t m = *mask;
 
if (m)
{
uint64_t d = *dst;
__m64 vd = to_m64 (d);
__m64 vdest = in_over (
vsrc, vsrca, expand_alpha_rev (to_m64 (m)), expand565 (vd, 0));
 
vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
*dst = to_uint64 (vd);
}
 
w--;
mask++;
dst++;
}
 
CHECKPOINT ();
 
while (w >= 4)
{
uint64_t m0, m1, m2, m3;
m0 = *mask;
m1 = *(mask + 1);
m2 = *(mask + 2);
m3 = *(mask + 3);
 
if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff)
{
*(uint64_t *)dst = srcsrcsrcsrc;
}
else if (m0 | m1 | m2 | m3)
{
__m64 vdest;
__m64 vm0, vm1, vm2, vm3;
 
vdest = *(__m64 *)dst;
 
vm0 = to_m64 (m0);
vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm0),
expand565 (vdest, 0)), vdest, 0);
vm1 = to_m64 (m1);
vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm1),
expand565 (vdest, 1)), vdest, 1);
vm2 = to_m64 (m2);
vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm2),
expand565 (vdest, 2)), vdest, 2);
vm3 = to_m64 (m3);
vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm3),
expand565 (vdest, 3)), vdest, 3);
 
*(__m64 *)dst = vdest;
}
 
w -= 4;
mask += 4;
dst += 4;
}
 
CHECKPOINT ();
 
while (w)
{
uint64_t m = *mask;
 
if (m)
{
uint64_t d = *dst;
__m64 vd = to_m64 (d);
__m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m)),
expand565 (vd, 0));
vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
*dst = to_uint64 (vd);
}
 
w--;
mask++;
dst++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint16_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
#if 0
/* FIXME */
assert (src_image->drawable == mask_image->drawable);
#endif
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
CHECKPOINT ();
 
while (w && (unsigned long)dst & 7)
{
__m64 vsrc = load8888 (*src);
uint64_t d = *dst;
__m64 vdest = expand565 (to_m64 (d), 0);
 
vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
 
*dst = to_uint64 (vdest);
 
w--;
dst++;
src++;
}
 
CHECKPOINT ();
 
while (w >= 4)
{
uint32_t s0, s1, s2, s3;
unsigned char a0, a1, a2, a3;
 
s0 = *src;
s1 = *(src + 1);
s2 = *(src + 2);
s3 = *(src + 3);
 
a0 = (s0 >> 24);
a1 = (s1 >> 24);
a2 = (s2 >> 24);
a3 = (s3 >> 24);
 
if ((a0 & a1 & a2 & a3) == 0xFF)
{
__m64 vdest;
vdest = pack_565 (invert_colors (load8888 (s0)), _mm_setzero_si64 (), 0);
vdest = pack_565 (invert_colors (load8888 (s1)), vdest, 1);
vdest = pack_565 (invert_colors (load8888 (s2)), vdest, 2);
vdest = pack_565 (invert_colors (load8888 (s3)), vdest, 3);
 
*(__m64 *)dst = vdest;
}
else if (s0 | s1 | s2 | s3)
{
__m64 vdest = *(__m64 *)dst;
 
vdest = pack_565 (over_rev_non_pre (load8888 (s0), expand565 (vdest, 0)), vdest, 0);
vdest = pack_565 (over_rev_non_pre (load8888 (s1), expand565 (vdest, 1)), vdest, 1);
vdest = pack_565 (over_rev_non_pre (load8888 (s2), expand565 (vdest, 2)), vdest, 2);
vdest = pack_565 (over_rev_non_pre (load8888 (s3), expand565 (vdest, 3)), vdest, 3);
 
*(__m64 *)dst = vdest;
}
 
w -= 4;
dst += 4;
src += 4;
}
 
CHECKPOINT ();
 
while (w)
{
__m64 vsrc = load8888 (*src);
uint64_t d = *dst;
__m64 vdest = expand565 (to_m64 (d), 0);
 
vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
 
*dst = to_uint64 (vdest);
 
w--;
dst++;
src++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
#if 0
/* FIXME */
assert (src_image->drawable == mask_image->drawable);
#endif
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w && (unsigned long)dst & 7)
{
__m64 s = load8888 (*src);
__m64 d = load8888 (*dst);
 
*dst = store8888 (over_rev_non_pre (s, d));
 
w--;
dst++;
src++;
}
 
while (w >= 2)
{
uint64_t s0, s1;
unsigned char a0, a1;
__m64 d0, d1;
 
s0 = *src;
s1 = *(src + 1);
 
a0 = (s0 >> 24);
a1 = (s1 >> 24);
 
if ((a0 & a1) == 0xFF)
{
d0 = invert_colors (load8888 (s0));
d1 = invert_colors (load8888 (s1));
 
*(__m64 *)dst = pack8888 (d0, d1);
}
else if (s0 | s1)
{
__m64 vdest = *(__m64 *)dst;
 
d0 = over_rev_non_pre (load8888 (s0), expand8888 (vdest, 0));
d1 = over_rev_non_pre (load8888 (s1), expand8888 (vdest, 1));
 
*(__m64 *)dst = pack8888 (d0, d1);
}
 
w -= 2;
dst += 2;
src += 2;
}
 
while (w)
{
__m64 s = load8888 (*src);
__m64 d = load8888 (*dst);
 
*dst = store8888 (over_rev_non_pre (s, d));
 
w--;
dst++;
src++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t src, srca;
uint16_t *dst_line;
uint32_t *mask_line;
int dst_stride, mask_stride;
__m64 vsrc, vsrca;
 
CHECKPOINT ();
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
srca = src >> 24;
if (src == 0)
return;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
int twidth = width;
uint32_t *p = (uint32_t *)mask_line;
uint16_t *q = (uint16_t *)dst_line;
 
while (twidth && ((unsigned long)q & 7))
{
uint32_t m = *(uint32_t *)p;
 
if (m)
{
uint64_t d = *q;
__m64 vdest = expand565 (to_m64 (d), 0);
vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
*q = to_uint64 (vdest);
}
 
twidth--;
p++;
q++;
}
 
while (twidth >= 4)
{
uint32_t m0, m1, m2, m3;
 
m0 = *p;
m1 = *(p + 1);
m2 = *(p + 2);
m3 = *(p + 3);
 
if ((m0 | m1 | m2 | m3))
{
__m64 vdest = *(__m64 *)q;
 
vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m0), expand565 (vdest, 0)), vdest, 0);
vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m1), expand565 (vdest, 1)), vdest, 1);
vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m2), expand565 (vdest, 2)), vdest, 2);
vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m3), expand565 (vdest, 3)), vdest, 3);
 
*(__m64 *)q = vdest;
}
twidth -= 4;
p += 4;
q += 4;
}
 
while (twidth)
{
uint32_t m;
 
m = *(uint32_t *)p;
if (m)
{
uint64_t d = *q;
__m64 vdest = expand565 (to_m64 (d), 0);
vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
*q = to_uint64 (vdest);
}
 
twidth--;
p++;
q++;
}
 
mask_line += mask_stride;
dst_line += dst_stride;
}
 
_mm_empty ();
}
 
static void
mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
int32_t w;
uint32_t src;
uint8_t sa;
__m64 vsrc, vsrca;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
sa = src >> 24;
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
if ((((unsigned long)dst_image & 3) == 0) &&
(((unsigned long)src_image & 3) == 0))
{
while (w >= 4)
{
uint32_t m;
__m64 vmask;
__m64 vdest;
 
m = 0;
 
vmask = load8888 (*(uint32_t *)mask);
vdest = load8888 (*(uint32_t *)dst);
 
*(uint32_t *)dst = store8888 (in (in (vsrca, vmask), vdest));
 
dst += 4;
mask += 4;
w -= 4;
}
}
 
while (w--)
{
uint16_t tmp;
uint8_t a;
uint32_t m, d;
 
a = *mask++;
d = *dst;
 
m = MUL_UN8 (sa, a, tmp);
d = MUL_UN8 (m, d, tmp);
 
*dst++ = d;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_in_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *src_line, *src;
int src_stride, dst_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
if ((((unsigned long)dst_image & 3) == 0) &&
(((unsigned long)src_image & 3) == 0))
{
while (w >= 4)
{
uint32_t *s = (uint32_t *)src;
uint32_t *d = (uint32_t *)dst;
 
*d = store8888 (in (load8888 (*s), load8888 (*d)));
 
w -= 4;
dst += 4;
src += 4;
}
}
 
while (w--)
{
uint8_t s, d;
uint16_t tmp;
 
s = *src;
d = *dst;
 
*dst = MUL_UN8 (s, d, tmp);
 
src++;
dst++;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_add_n_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *mask_line, *mask;
int dst_stride, mask_stride;
int32_t w;
uint32_t src;
uint8_t sa;
__m64 vsrc, vsrca;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
src = _pixman_image_get_solid (src_image, dst_image->bits.format);
 
sa = src >> 24;
 
if (src == 0)
return;
 
vsrc = load8888 (src);
vsrca = expand_alpha (vsrc);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
w = width;
 
if ((((unsigned long)mask_image & 3) == 0) &&
(((unsigned long)dst_image & 3) == 0))
{
while (w >= 4)
{
__m64 vmask = load8888 (*(uint32_t *)mask);
__m64 vdest = load8888 (*(uint32_t *)dst);
 
*(uint32_t *)dst = store8888 (_mm_adds_pu8 (in (vsrca, vmask), vdest));
 
w -= 4;
dst += 4;
mask += 4;
}
}
 
while (w--)
{
uint16_t tmp;
uint16_t a;
uint32_t m, d;
uint32_t r;
 
a = *mask++;
d = *dst;
 
m = MUL_UN8 (sa, a, tmp);
r = ADD_UN8 (m, d, tmp);
 
*dst++ = r;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_add_8_8 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint8_t *dst_line, *dst;
uint8_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
uint8_t s, d;
uint16_t t;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w && (unsigned long)dst & 7)
{
s = *src;
d = *dst;
t = d + s;
s = t | (0 - (t >> 8));
*dst = s;
 
dst++;
src++;
w--;
}
 
while (w >= 8)
{
*(__m64*)dst = _mm_adds_pu8 (*(__m64*)src, *(__m64*)dst);
dst += 8;
src += 8;
w -= 8;
}
 
while (w)
{
s = *src;
d = *dst;
t = d + s;
s = t | (0 - (t >> 8));
*dst = s;
 
dst++;
src++;
w--;
}
}
 
_mm_empty ();
}
 
static void
mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
__m64 dst64;
uint32_t *dst_line, *dst;
uint32_t *src_line, *src;
int dst_stride, src_stride;
int32_t w;
 
CHECKPOINT ();
 
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
 
while (w && (unsigned long)dst & 7)
{
*dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
_mm_cvtsi32_si64 (*dst)));
dst++;
src++;
w--;
}
 
while (w >= 2)
{
dst64 = _mm_adds_pu8 (*(__m64*)src, *(__m64*)dst);
*(uint64_t*)dst = to_uint64 (dst64);
dst += 2;
src += 2;
w -= 2;
}
 
if (w)
{
*dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
_mm_cvtsi32_si64 (*dst)));
 
}
}
 
_mm_empty ();
}
 
static pixman_bool_t
pixman_blt_mmx (uint32_t *src_bits,
uint32_t *dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
uint8_t * src_bytes;
uint8_t * dst_bytes;
int byte_width;
 
if (src_bpp != dst_bpp)
return FALSE;
 
if (src_bpp == 16)
{
src_stride = src_stride * (int) sizeof (uint32_t) / 2;
dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
byte_width = 2 * width;
src_stride *= 2;
dst_stride *= 2;
}
else if (src_bpp == 32)
{
src_stride = src_stride * (int) sizeof (uint32_t) / 4;
dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
byte_width = 4 * width;
src_stride *= 4;
dst_stride *= 4;
}
else
{
return FALSE;
}
 
while (height--)
{
int w;
uint8_t *s = src_bytes;
uint8_t *d = dst_bytes;
src_bytes += src_stride;
dst_bytes += dst_stride;
w = byte_width;
 
while (w >= 2 && ((unsigned long)d & 3))
{
*(uint16_t *)d = *(uint16_t *)s;
w -= 2;
s += 2;
d += 2;
}
 
while (w >= 4 && ((unsigned long)d & 7))
{
*(uint32_t *)d = *(uint32_t *)s;
 
w -= 4;
s += 4;
d += 4;
}
 
while (w >= 64)
{
#if defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
__asm__ (
"movq (%1), %%mm0\n"
"movq 8(%1), %%mm1\n"
"movq 16(%1), %%mm2\n"
"movq 24(%1), %%mm3\n"
"movq 32(%1), %%mm4\n"
"movq 40(%1), %%mm5\n"
"movq 48(%1), %%mm6\n"
"movq 56(%1), %%mm7\n"
 
"movq %%mm0, (%0)\n"
"movq %%mm1, 8(%0)\n"
"movq %%mm2, 16(%0)\n"
"movq %%mm3, 24(%0)\n"
"movq %%mm4, 32(%0)\n"
"movq %%mm5, 40(%0)\n"
"movq %%mm6, 48(%0)\n"
"movq %%mm7, 56(%0)\n"
:
: "r" (d), "r" (s)
: "memory",
"%mm0", "%mm1", "%mm2", "%mm3",
"%mm4", "%mm5", "%mm6", "%mm7");
#else
__m64 v0 = *(__m64 *)(s + 0);
__m64 v1 = *(__m64 *)(s + 8);
__m64 v2 = *(__m64 *)(s + 16);
__m64 v3 = *(__m64 *)(s + 24);
__m64 v4 = *(__m64 *)(s + 32);
__m64 v5 = *(__m64 *)(s + 40);
__m64 v6 = *(__m64 *)(s + 48);
__m64 v7 = *(__m64 *)(s + 56);
*(__m64 *)(d + 0) = v0;
*(__m64 *)(d + 8) = v1;
*(__m64 *)(d + 16) = v2;
*(__m64 *)(d + 24) = v3;
*(__m64 *)(d + 32) = v4;
*(__m64 *)(d + 40) = v5;
*(__m64 *)(d + 48) = v6;
*(__m64 *)(d + 56) = v7;
#endif
 
w -= 64;
s += 64;
d += 64;
}
while (w >= 4)
{
*(uint32_t *)d = *(uint32_t *)s;
 
w -= 4;
s += 4;
d += 4;
}
if (w >= 2)
{
*(uint16_t *)d = *(uint16_t *)s;
w -= 2;
s += 2;
d += 2;
}
}
 
_mm_empty ();
 
return TRUE;
}
 
static void
mmx_composite_copy_area (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
pixman_blt_mmx (src_image->bits.bits,
dst_image->bits.bits,
src_image->bits.rowstride,
dst_image->bits.rowstride,
PIXMAN_FORMAT_BPP (src_image->bits.format),
PIXMAN_FORMAT_BPP (dst_image->bits.format),
src_x, src_y, dest_x, dest_y, width, height);
}
 
#if 0
static void
mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
uint32_t *src, *src_line;
uint32_t *dst, *dst_line;
uint8_t *mask, *mask_line;
int src_stride, mask_stride, dst_stride;
int32_t w;
 
PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
while (height--)
{
src = src_line;
src_line += src_stride;
dst = dst_line;
dst_line += dst_stride;
mask = mask_line;
mask_line += mask_stride;
 
w = width;
 
while (w--)
{
uint64_t m = *mask;
 
if (m)
{
__m64 s = load8888 (*src | 0xff000000);
 
if (m == 0xff)
{
*dst = store8888 (s);
}
else
{
__m64 sa = expand_alpha (s);
__m64 vm = expand_alpha_rev (to_m64 (m));
__m64 vdest = in_over (s, sa, vm, load8888 (*dst));
 
*dst = store8888 (vdest);
}
}
 
mask++;
dst++;
src++;
}
}
 
_mm_empty ();
}
#endif
 
static const pixman_fast_path_t mmx_fast_paths[] =
{
PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mmx_composite_over_n_8_0565 ),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mmx_composite_over_n_8_0565 ),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mmx_composite_over_n_8_8888 ),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mmx_composite_over_n_8_8888 ),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mmx_composite_over_n_8_8888 ),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mmx_composite_over_n_8_8888 ),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mmx_composite_over_n_8888_8888_ca ),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mmx_composite_over_n_8888_8888_ca ),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mmx_composite_over_n_8888_0565_ca ),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mmx_composite_over_n_8888_8888_ca ),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mmx_composite_over_n_8888_8888_ca ),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mmx_composite_over_n_8888_0565_ca ),
PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, mmx_composite_over_pixbuf_8888 ),
PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, mmx_composite_over_pixbuf_8888 ),
PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, mmx_composite_over_pixbuf_0565 ),
PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, mmx_composite_over_pixbuf_8888 ),
PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, mmx_composite_over_pixbuf_8888 ),
PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, mmx_composite_over_pixbuf_0565 ),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, mmx_composite_over_x888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, mmx_composite_over_x888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, mmx_composite_over_x888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, mmx_composite_over_x888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, mmx_composite_over_8888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, mmx_composite_over_8888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, mmx_composite_over_8888_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, mmx_composite_over_8888_n_8888 ),
#if 0
/* FIXME: This code is commented out since it's apparently
* not actually faster than the generic code.
*/
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, mmx_composite_over_x888_8_8888 ),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ),
PIXMAN_STD_FAST_PATH (OVER, x8b8r8g8, a8, x8b8g8r8, mmx_composite_over_x888_8_8888 ),
PIXMAN_STD_FAST_PATH (OVER, x8b8r8g8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ),
#endif
PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, mmx_composite_over_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, mmx_composite_over_n_8888 ),
PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, mmx_composite_over_n_0565 ),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
 
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, mmx_composite_over_8888_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, mmx_composite_over_8888_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, mmx_composite_over_8888_0565 ),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, mmx_composite_over_8888_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mmx_composite_over_8888_8888 ),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mmx_composite_over_8888_0565 ),
 
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mmx_composite_add_8888_8888 ),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mmx_composite_add_8888_8888 ),
PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8_8 ),
PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mmx_composite_add_n_8_8 ),
 
PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mmx_composite_src_n_8_8888 ),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mmx_composite_src_n_8_8888 ),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mmx_composite_src_n_8_8888 ),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, mmx_composite_src_n_8_8888 ),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mmx_composite_copy_area ),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mmx_composite_copy_area ),
 
PIXMAN_STD_FAST_PATH (IN, a8, null, a8, mmx_composite_in_8_8 ),
PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, mmx_composite_in_n_8_8 ),
 
{ PIXMAN_OP_NONE },
};
 
static pixman_bool_t
mmx_blt (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
if (!pixman_blt_mmx (
src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
src_x, src_y, dst_x, dst_y, width, height))
 
{
return _pixman_implementation_blt (
imp->delegate,
src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
src_x, src_y, dst_x, dst_y, width, height);
}
 
return TRUE;
}
 
static pixman_bool_t
mmx_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
if (!pixman_fill_mmx (bits, stride, bpp, x, y, width, height, xor))
{
return _pixman_implementation_fill (
imp->delegate, bits, stride, bpp, x, y, width, height, xor);
}
 
return TRUE;
}
 
pixman_implementation_t *
_pixman_implementation_create_mmx (void)
{
pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
pixman_implementation_t *imp = _pixman_implementation_create (general, mmx_fast_paths);
 
imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u;
imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u;
imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u;
imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u;
imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u;
imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u;
imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u;
imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u;
imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u;
imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
 
imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca;
imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca;
imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca;
imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca;
imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca;
imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca;
imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca;
imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca;
 
imp->blt = mmx_blt;
imp->fill = mmx_fill;
 
return imp;
}
 
#endif /* USE_MMX */
/programs/develop/libraries/pixman/pixman-private.h
0,0 → 1,873
 
#ifndef PIXMAN_PRIVATE_H
#define PIXMAN_PRIVATE_H
 
#define PIXMAN_DISABLE_DEPRECATED
#define PIXMAN_USE_INTERNAL_API
 
#include "pixman.h"
#include <time.h>
#include <assert.h>
#include <stdio.h>
#include <string.h>
 
#include "pixman-compiler.h"
 
/*
* Images
*/
typedef struct image_common image_common_t;
typedef struct source_image source_image_t;
typedef struct solid_fill solid_fill_t;
typedef struct gradient gradient_t;
typedef struct linear_gradient linear_gradient_t;
typedef struct horizontal_gradient horizontal_gradient_t;
typedef struct vertical_gradient vertical_gradient_t;
typedef struct conical_gradient conical_gradient_t;
typedef struct radial_gradient radial_gradient_t;
typedef struct bits_image bits_image_t;
typedef struct circle circle_t;
 
typedef void (*fetch_scanline_t) (pixman_image_t *image,
int x,
int y,
int width,
uint32_t *buffer,
const uint32_t *mask);
 
typedef uint32_t (*fetch_pixel_32_t) (bits_image_t *image,
int x,
int y);
 
typedef uint64_t (*fetch_pixel_64_t) (bits_image_t *image,
int x,
int y);
 
typedef void (*store_scanline_t) (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *values);
 
typedef enum
{
BITS,
LINEAR,
CONICAL,
RADIAL,
SOLID
} image_type_t;
 
typedef enum
{
SOURCE_IMAGE_CLASS_UNKNOWN,
SOURCE_IMAGE_CLASS_HORIZONTAL,
} source_image_class_t;
 
typedef source_image_class_t (*classify_func_t) (pixman_image_t *image,
int x,
int y,
int width,
int height);
typedef void (*property_changed_func_t) (pixman_image_t *image);
 
struct image_common
{
image_type_t type;
int32_t ref_count;
pixman_region32_t clip_region;
int32_t alpha_count; /* How many times this image is being used as an alpha map */
pixman_bool_t have_clip_region; /* FALSE if there is no clip */
pixman_bool_t client_clip; /* Whether the source clip was
set by a client */
pixman_bool_t clip_sources; /* Whether the clip applies when
* the image is used as a source
*/
pixman_bool_t dirty;
pixman_transform_t * transform;
pixman_repeat_t repeat;
pixman_filter_t filter;
pixman_fixed_t * filter_params;
int n_filter_params;
bits_image_t * alpha_map;
int alpha_origin_x;
int alpha_origin_y;
pixman_bool_t component_alpha;
classify_func_t classify;
property_changed_func_t property_changed;
fetch_scanline_t get_scanline_32;
fetch_scanline_t get_scanline_64;
 
pixman_image_destroy_func_t destroy_func;
void * destroy_data;
 
uint32_t flags;
pixman_format_code_t extended_format_code;
};
 
struct source_image
{
image_common_t common;
};
 
struct solid_fill
{
source_image_t common;
pixman_color_t color;
 
uint32_t color_32;
uint64_t color_64;
};
 
struct gradient
{
source_image_t common;
int n_stops;
pixman_gradient_stop_t *stops;
int stop_range;
};
 
struct linear_gradient
{
gradient_t common;
pixman_point_fixed_t p1;
pixman_point_fixed_t p2;
};
 
struct circle
{
pixman_fixed_t x;
pixman_fixed_t y;
pixman_fixed_t radius;
};
 
struct radial_gradient
{
gradient_t common;
 
circle_t c1;
circle_t c2;
 
circle_t delta;
double a;
double inva;
double mindr;
};
 
struct conical_gradient
{
gradient_t common;
pixman_point_fixed_t center;
double angle;
};
 
struct bits_image
{
image_common_t common;
pixman_format_code_t format;
const pixman_indexed_t * indexed;
int width;
int height;
uint32_t * bits;
uint32_t * free_me;
int rowstride; /* in number of uint32_t's */
 
fetch_scanline_t fetch_scanline_32;
fetch_pixel_32_t fetch_pixel_32;
store_scanline_t store_scanline_32;
 
fetch_scanline_t fetch_scanline_64;
fetch_pixel_64_t fetch_pixel_64;
store_scanline_t store_scanline_64;
 
/* Used for indirect access to the bits */
pixman_read_memory_func_t read_func;
pixman_write_memory_func_t write_func;
};
 
union pixman_image
{
image_type_t type;
image_common_t common;
bits_image_t bits;
source_image_t source;
gradient_t gradient;
linear_gradient_t linear;
conical_gradient_t conical;
radial_gradient_t radial;
solid_fill_t solid;
};
 
void
_pixman_bits_image_setup_accessors (bits_image_t *image);
 
void
_pixman_image_get_scanline_generic_64 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask);
 
source_image_class_t
_pixman_image_classify (pixman_image_t *image,
int x,
int y,
int width,
int height);
 
void
_pixman_image_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask);
 
/* Even thought the type of buffer is uint32_t *, the function actually expects
* a uint64_t *buffer.
*/
void
_pixman_image_get_scanline_64 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *unused);
 
void
_pixman_image_store_scanline_32 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *buffer);
 
/* Even though the type of buffer is uint32_t *, the function
* actually expects a uint64_t *buffer.
*/
void
_pixman_image_store_scanline_64 (bits_image_t * image,
int x,
int y,
int width,
const uint32_t *buffer);
 
pixman_image_t *
_pixman_image_allocate (void);
 
pixman_bool_t
_pixman_init_gradient (gradient_t * gradient,
const pixman_gradient_stop_t *stops,
int n_stops);
void
_pixman_image_reset_clip_region (pixman_image_t *image);
 
void
_pixman_image_validate (pixman_image_t *image);
 
uint32_t
_pixman_image_get_solid (pixman_image_t * image,
pixman_format_code_t format);
 
#define PIXMAN_IMAGE_GET_LINE(image, x, y, type, out_stride, line, mul) \
do \
{ \
uint32_t *__bits__; \
int __stride__; \
\
__bits__ = image->bits.bits; \
__stride__ = image->bits.rowstride; \
(out_stride) = \
__stride__ * (int) sizeof (uint32_t) / (int) sizeof (type); \
(line) = \
((type *) __bits__) + (out_stride) * (y) + (mul) * (x); \
} while (0)
 
/*
* Gradient walker
*/
typedef struct
{
uint32_t left_ag;
uint32_t left_rb;
uint32_t right_ag;
uint32_t right_rb;
int32_t left_x;
int32_t right_x;
int32_t stepper;
 
pixman_gradient_stop_t *stops;
int num_stops;
unsigned int spread;
 
int need_reset;
} pixman_gradient_walker_t;
 
void
_pixman_gradient_walker_init (pixman_gradient_walker_t *walker,
gradient_t * gradient,
unsigned int spread);
 
void
_pixman_gradient_walker_reset (pixman_gradient_walker_t *walker,
pixman_fixed_32_32_t pos);
 
uint32_t
_pixman_gradient_walker_pixel (pixman_gradient_walker_t *walker,
pixman_fixed_32_32_t x);
 
/*
* Edges
*/
 
#define MAX_ALPHA(n) ((1 << (n)) - 1)
#define N_Y_FRAC(n) ((n) == 1 ? 1 : (1 << ((n) / 2)) - 1)
#define N_X_FRAC(n) ((n) == 1 ? 1 : (1 << ((n) / 2)) + 1)
 
#define STEP_Y_SMALL(n) (pixman_fixed_1 / N_Y_FRAC (n))
#define STEP_Y_BIG(n) (pixman_fixed_1 - (N_Y_FRAC (n) - 1) * STEP_Y_SMALL (n))
 
#define Y_FRAC_FIRST(n) (STEP_Y_BIG (n) / 2)
#define Y_FRAC_LAST(n) (Y_FRAC_FIRST (n) + (N_Y_FRAC (n) - 1) * STEP_Y_SMALL (n))
 
#define STEP_X_SMALL(n) (pixman_fixed_1 / N_X_FRAC (n))
#define STEP_X_BIG(n) (pixman_fixed_1 - (N_X_FRAC (n) - 1) * STEP_X_SMALL (n))
 
#define X_FRAC_FIRST(n) (STEP_X_BIG (n) / 2)
#define X_FRAC_LAST(n) (X_FRAC_FIRST (n) + (N_X_FRAC (n) - 1) * STEP_X_SMALL (n))
 
#define RENDER_SAMPLES_X(x, n) \
((n) == 1? 0 : (pixman_fixed_frac (x) + \
X_FRAC_FIRST (n)) / STEP_X_SMALL (n))
 
void
pixman_rasterize_edges_accessors (pixman_image_t *image,
pixman_edge_t * l,
pixman_edge_t * r,
pixman_fixed_t t,
pixman_fixed_t b);
 
/*
* Implementations
*/
typedef struct pixman_implementation_t pixman_implementation_t;
 
typedef void (*pixman_combine_32_func_t) (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width);
 
typedef void (*pixman_combine_64_func_t) (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width);
 
typedef void (*pixman_composite_func_t) (pixman_implementation_t *imp,
pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
typedef pixman_bool_t (*pixman_blt_func_t) (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height);
typedef pixman_bool_t (*pixman_fill_func_t) (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor);
 
void _pixman_setup_combiner_functions_32 (pixman_implementation_t *imp);
void _pixman_setup_combiner_functions_64 (pixman_implementation_t *imp);
 
typedef struct
{
pixman_op_t op;
pixman_format_code_t src_format;
uint32_t src_flags;
pixman_format_code_t mask_format;
uint32_t mask_flags;
pixman_format_code_t dest_format;
uint32_t dest_flags;
pixman_composite_func_t func;
} pixman_fast_path_t;
 
struct pixman_implementation_t
{
pixman_implementation_t * toplevel;
pixman_implementation_t * delegate;
const pixman_fast_path_t * fast_paths;
 
pixman_blt_func_t blt;
pixman_fill_func_t fill;
 
pixman_combine_32_func_t combine_32[PIXMAN_N_OPERATORS];
pixman_combine_32_func_t combine_32_ca[PIXMAN_N_OPERATORS];
pixman_combine_64_func_t combine_64[PIXMAN_N_OPERATORS];
pixman_combine_64_func_t combine_64_ca[PIXMAN_N_OPERATORS];
};
 
pixman_implementation_t *
_pixman_implementation_create (pixman_implementation_t *delegate,
const pixman_fast_path_t *fast_paths);
 
void
_pixman_implementation_combine_32 (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width);
void
_pixman_implementation_combine_64 (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width);
void
_pixman_implementation_combine_32_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint32_t * dest,
const uint32_t * src,
const uint32_t * mask,
int width);
void
_pixman_implementation_combine_64_ca (pixman_implementation_t *imp,
pixman_op_t op,
uint64_t * dest,
const uint64_t * src,
const uint64_t * mask,
int width);
 
pixman_bool_t
_pixman_implementation_blt (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height);
 
pixman_bool_t
_pixman_implementation_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor);
 
/* Specific implementations */
pixman_implementation_t *
_pixman_implementation_create_general (void);
 
pixman_implementation_t *
_pixman_implementation_create_fast_path (void);
 
#ifdef USE_MMX
pixman_implementation_t *
_pixman_implementation_create_mmx (void);
#endif
 
#ifdef USE_SSE2
pixman_implementation_t *
_pixman_implementation_create_sse2 (void);
#endif
 
#ifdef USE_ARM_SIMD
pixman_implementation_t *
_pixman_implementation_create_arm_simd (void);
#endif
 
#ifdef USE_ARM_NEON
pixman_implementation_t *
_pixman_implementation_create_arm_neon (void);
#endif
 
#ifdef USE_VMX
pixman_implementation_t *
_pixman_implementation_create_vmx (void);
#endif
 
pixman_implementation_t *
_pixman_choose_implementation (void);
 
 
 
/*
* Utilities
*/
 
/* These "formats" all have depth 0, so they
* will never clash with any real ones
*/
#define PIXMAN_null PIXMAN_FORMAT (0, 0, 0, 0, 0, 0)
#define PIXMAN_solid PIXMAN_FORMAT (0, 1, 0, 0, 0, 0)
#define PIXMAN_pixbuf PIXMAN_FORMAT (0, 2, 0, 0, 0, 0)
#define PIXMAN_rpixbuf PIXMAN_FORMAT (0, 3, 0, 0, 0, 0)
#define PIXMAN_unknown PIXMAN_FORMAT (0, 4, 0, 0, 0, 0)
#define PIXMAN_any PIXMAN_FORMAT (0, 5, 0, 0, 0, 0)
 
#define PIXMAN_OP_any (PIXMAN_N_OPERATORS + 1)
 
#define FAST_PATH_ID_TRANSFORM (1 << 0)
#define FAST_PATH_NO_ALPHA_MAP (1 << 1)
#define FAST_PATH_NO_CONVOLUTION_FILTER (1 << 2)
#define FAST_PATH_NO_PAD_REPEAT (1 << 3)
#define FAST_PATH_NO_REFLECT_REPEAT (1 << 4)
#define FAST_PATH_NO_ACCESSORS (1 << 5)
#define FAST_PATH_NARROW_FORMAT (1 << 6)
#define FAST_PATH_COMPONENT_ALPHA (1 << 8)
#define FAST_PATH_SAMPLES_OPAQUE (1 << 7)
#define FAST_PATH_UNIFIED_ALPHA (1 << 9)
#define FAST_PATH_SCALE_TRANSFORM (1 << 10)
#define FAST_PATH_NEAREST_FILTER (1 << 11)
#define FAST_PATH_HAS_TRANSFORM (1 << 12)
#define FAST_PATH_IS_OPAQUE (1 << 13)
#define FAST_PATH_NEEDS_WORKAROUND (1 << 14)
#define FAST_PATH_NO_NONE_REPEAT (1 << 15)
#define FAST_PATH_SAMPLES_COVER_CLIP (1 << 16)
#define FAST_PATH_X_UNIT_POSITIVE (1 << 17)
#define FAST_PATH_AFFINE_TRANSFORM (1 << 18)
#define FAST_PATH_Y_UNIT_ZERO (1 << 19)
#define FAST_PATH_BILINEAR_FILTER (1 << 20)
#define FAST_PATH_NO_NORMAL_REPEAT (1 << 21)
 
#define FAST_PATH_PAD_REPEAT \
(FAST_PATH_NO_NONE_REPEAT | \
FAST_PATH_NO_NORMAL_REPEAT | \
FAST_PATH_NO_REFLECT_REPEAT)
 
#define FAST_PATH_NORMAL_REPEAT \
(FAST_PATH_NO_NONE_REPEAT | \
FAST_PATH_NO_PAD_REPEAT | \
FAST_PATH_NO_REFLECT_REPEAT)
 
#define FAST_PATH_NONE_REPEAT \
(FAST_PATH_NO_NORMAL_REPEAT | \
FAST_PATH_NO_PAD_REPEAT | \
FAST_PATH_NO_REFLECT_REPEAT)
 
#define FAST_PATH_REFLECT_REPEAT \
(FAST_PATH_NO_NONE_REPEAT | \
FAST_PATH_NO_NORMAL_REPEAT | \
FAST_PATH_NO_PAD_REPEAT)
 
#define FAST_PATH_STANDARD_FLAGS \
(FAST_PATH_NO_CONVOLUTION_FILTER | \
FAST_PATH_NO_ACCESSORS | \
FAST_PATH_NO_ALPHA_MAP | \
FAST_PATH_NARROW_FORMAT)
 
#define FAST_PATH_STD_DEST_FLAGS \
(FAST_PATH_NO_ACCESSORS | \
FAST_PATH_NO_ALPHA_MAP | \
FAST_PATH_NARROW_FORMAT)
 
#define SOURCE_FLAGS(format) \
(FAST_PATH_STANDARD_FLAGS | \
((PIXMAN_ ## format == PIXMAN_solid) ? \
0 : (FAST_PATH_SAMPLES_COVER_CLIP | FAST_PATH_ID_TRANSFORM)))
 
#define MASK_FLAGS(format, extra) \
((PIXMAN_ ## format == PIXMAN_null) ? 0 : (SOURCE_FLAGS (format) | extra))
 
#define FAST_PATH(op, src, src_flags, mask, mask_flags, dest, dest_flags, func) \
PIXMAN_OP_ ## op, \
PIXMAN_ ## src, \
src_flags, \
PIXMAN_ ## mask, \
mask_flags, \
PIXMAN_ ## dest, \
dest_flags, \
func
 
#define PIXMAN_STD_FAST_PATH(op, src, mask, dest, func) \
{ FAST_PATH ( \
op, \
src, SOURCE_FLAGS (src), \
mask, MASK_FLAGS (mask, FAST_PATH_UNIFIED_ALPHA), \
dest, FAST_PATH_STD_DEST_FLAGS, \
func) }
 
#define PIXMAN_STD_FAST_PATH_CA(op, src, mask, dest, func) \
{ FAST_PATH ( \
op, \
src, SOURCE_FLAGS (src), \
mask, MASK_FLAGS (mask, FAST_PATH_COMPONENT_ALPHA), \
dest, FAST_PATH_STD_DEST_FLAGS, \
func) }
 
/* Memory allocation helpers */
void *
pixman_malloc_ab (unsigned int n, unsigned int b);
 
void *
pixman_malloc_abc (unsigned int a, unsigned int b, unsigned int c);
 
pixman_bool_t
pixman_multiply_overflows_int (unsigned int a, unsigned int b);
 
pixman_bool_t
pixman_addition_overflows_int (unsigned int a, unsigned int b);
 
/* Compositing utilities */
void
pixman_expand (uint64_t * dst,
const uint32_t * src,
pixman_format_code_t format,
int width);
 
void
pixman_contract (uint32_t * dst,
const uint64_t *src,
int width);
 
 
/* Region Helpers */
pixman_bool_t
pixman_region32_copy_from_region16 (pixman_region32_t *dst,
pixman_region16_t *src);
 
pixman_bool_t
pixman_region16_copy_from_region32 (pixman_region16_t *dst,
pixman_region32_t *src);
 
 
/* Misc macros */
 
#ifndef FALSE
# define FALSE 0
#endif
 
#ifndef TRUE
# define TRUE 1
#endif
 
#ifndef MIN
# define MIN(a, b) ((a < b) ? a : b)
#endif
 
#ifndef MAX
# define MAX(a, b) ((a > b) ? a : b)
#endif
 
/* Integer division that rounds towards -infinity */
#define DIV(a, b) \
((((a) < 0) == ((b) < 0)) ? (a) / (b) : \
((a) - (b) + 1 - (((b) < 0) << 1)) / (b))
 
/* Modulus that produces the remainder wrt. DIV */
#define MOD(a, b) ((a) < 0 ? ((b) - ((-(a) - 1) % (b))) - 1 : (a) % (b))
 
#define CLIP(v, low, high) ((v) < (low) ? (low) : ((v) > (high) ? (high) : (v)))
 
/* Conversion between 8888 and 0565 */
 
#define CONVERT_8888_TO_0565(s) \
((((s) >> 3) & 0x001f) | \
(((s) >> 5) & 0x07e0) | \
(((s) >> 8) & 0xf800))
 
#define CONVERT_0565_TO_0888(s) \
(((((s) << 3) & 0xf8) | (((s) >> 2) & 0x7)) | \
((((s) << 5) & 0xfc00) | (((s) >> 1) & 0x300)) | \
((((s) << 8) & 0xf80000) | (((s) << 3) & 0x70000)))
 
#define CONVERT_0565_TO_8888(s) (CONVERT_0565_TO_0888(s) | 0xff000000)
 
/* Trivial versions that are useful in macros */
#define CONVERT_8888_TO_8888(s) (s)
#define CONVERT_0565_TO_0565(s) (s)
 
#define PIXMAN_FORMAT_IS_WIDE(f) \
(PIXMAN_FORMAT_A (f) > 8 || \
PIXMAN_FORMAT_R (f) > 8 || \
PIXMAN_FORMAT_G (f) > 8 || \
PIXMAN_FORMAT_B (f) > 8)
 
#ifdef WORDS_BIGENDIAN
# define SCREEN_SHIFT_LEFT(x,n) ((x) << (n))
# define SCREEN_SHIFT_RIGHT(x,n) ((x) >> (n))
#else
# define SCREEN_SHIFT_LEFT(x,n) ((x) >> (n))
# define SCREEN_SHIFT_RIGHT(x,n) ((x) << (n))
#endif
 
/*
* Various debugging code
*/
 
#undef DEBUG
 
#define COMPILE_TIME_ASSERT(x) \
do { typedef int compile_time_assertion [(x)?1:-1]; } while (0)
 
/* Turn on debugging depending on what type of release this is
*/
#if (((PIXMAN_VERSION_MICRO % 2) == 0) && ((PIXMAN_VERSION_MINOR % 2) == 1))
 
/* Debugging gets turned on for development releases because these
* are the things that end up in bleeding edge distributions such
* as Rawhide etc.
*
* For performance reasons we don't turn it on for stable releases or
* random git checkouts. (Random git checkouts are often used for
* performance work).
*/
 
# define DEBUG
 
#endif
 
#ifdef DEBUG
 
void
_pixman_log_error (const char *function, const char *message);
 
#define return_if_fail(expr) \
do \
{ \
if (!(expr)) \
{ \
_pixman_log_error (FUNC, "The expression " # expr " was false"); \
return; \
} \
} \
while (0)
 
#define return_val_if_fail(expr, retval) \
do \
{ \
if (!(expr)) \
{ \
_pixman_log_error (FUNC, "The expression " # expr " was false"); \
return (retval); \
} \
} \
while (0)
 
#define critical_if_fail(expr) \
do \
{ \
if (!(expr)) \
_pixman_log_error (FUNC, "The expression " # expr " was false"); \
} \
while (0)
 
 
#else
 
#define _pixman_log_error(f,m) do { } while (0) \
 
#define return_if_fail(expr) \
do \
{ \
if (!(expr)) \
return; \
} \
while (0)
 
#define return_val_if_fail(expr, retval) \
do \
{ \
if (!(expr)) \
return (retval); \
} \
while (0)
 
#define critical_if_fail(expr) \
do \
{ \
} \
while (0)
#endif
 
/*
* Timers
*/
 
#ifdef PIXMAN_TIMERS
 
static inline uint64_t
oil_profile_stamp_rdtsc (void)
{
uint64_t ts;
 
__asm__ __volatile__ ("rdtsc\n" : "=A" (ts));
return ts;
}
 
#define OIL_STAMP oil_profile_stamp_rdtsc
 
typedef struct pixman_timer_t pixman_timer_t;
 
struct pixman_timer_t
{
int initialized;
const char * name;
uint64_t n_times;
uint64_t total;
pixman_timer_t *next;
};
 
extern int timer_defined;
 
void pixman_timer_register (pixman_timer_t *timer);
 
#define TIMER_BEGIN(tname) \
{ \
static pixman_timer_t timer ## tname; \
uint64_t begin ## tname; \
\
if (!timer ## tname.initialized) \
{ \
timer ## tname.initialized = 1; \
timer ## tname.name = # tname; \
pixman_timer_register (&timer ## tname); \
} \
\
timer ## tname.n_times++; \
begin ## tname = OIL_STAMP ();
 
#define TIMER_END(tname) \
timer ## tname.total += OIL_STAMP () - begin ## tname; \
}
 
#endif /* PIXMAN_TIMERS */
 
#endif /* PIXMAN_PRIVATE_H */
/programs/develop/libraries/pixman/pixman-radial-gradient.c
0,0 → 1,448
/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
/*
*
* Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
* Copyright © 2000 SuSE, Inc.
* 2005 Lars Knoll & Zack Rusin, Trolltech
* Copyright © 2007 Red Hat, Inc.
*
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdlib.h>
#include <math.h>
#include "pixman-private.h"
 
static inline pixman_fixed_32_32_t
dot (pixman_fixed_48_16_t x1,
pixman_fixed_48_16_t y1,
pixman_fixed_48_16_t z1,
pixman_fixed_48_16_t x2,
pixman_fixed_48_16_t y2,
pixman_fixed_48_16_t z2)
{
/*
* Exact computation, assuming that the input values can
* be represented as pixman_fixed_16_16_t
*/
return x1 * x2 + y1 * y2 + z1 * z2;
}
 
static inline double
fdot (double x1,
double y1,
double z1,
double x2,
double y2,
double z2)
{
/*
* Error can be unbound in some special cases.
* Using clever dot product algorithms (for example compensated
* dot product) would improve this but make the code much less
* obvious
*/
return x1 * x2 + y1 * y2 + z1 * z2;
}
 
static uint32_t
radial_compute_color (double a,
double b,
double c,
double inva,
double dr,
double mindr,
pixman_gradient_walker_t *walker,
pixman_repeat_t repeat)
{
/*
* In this function error propagation can lead to bad results:
* - det can have an unbound error (if b*b-a*c is very small),
* potentially making it the opposite sign of what it should have been
* (thus clearing a pixel that would have been colored or vice-versa)
* or propagating the error to sqrtdet;
* if det has the wrong sign or b is very small, this can lead to bad
* results
*
* - the algorithm used to compute the solutions of the quadratic
* equation is not numerically stable (but saves one division compared
* to the numerically stable one);
* this can be a problem if a*c is much smaller than b*b
*
* - the above problems are worse if a is small (as inva becomes bigger)
*/
double det;
 
if (a == 0)
{
double t;
 
if (b == 0)
return 0;
 
t = pixman_fixed_1 / 2 * c / b;
if (repeat == PIXMAN_REPEAT_NONE)
{
if (0 <= t && t <= pixman_fixed_1)
return _pixman_gradient_walker_pixel (walker, t);
}
else
{
if (t * dr > mindr)
return _pixman_gradient_walker_pixel (walker, t);
}
 
return 0;
}
 
det = fdot (b, a, 0, b, -c, 0);
if (det >= 0)
{
double sqrtdet, t0, t1;
 
sqrtdet = sqrt (det);
t0 = (b + sqrtdet) * inva;
t1 = (b - sqrtdet) * inva;
 
if (repeat == PIXMAN_REPEAT_NONE)
{
if (0 <= t0 && t0 <= pixman_fixed_1)
return _pixman_gradient_walker_pixel (walker, t0);
else if (0 <= t1 && t1 <= pixman_fixed_1)
return _pixman_gradient_walker_pixel (walker, t1);
}
else
{
if (t0 * dr > mindr)
return _pixman_gradient_walker_pixel (walker, t0);
else if (t1 * dr > mindr)
return _pixman_gradient_walker_pixel (walker, t1);
}
}
 
return 0;
}
 
static void
radial_gradient_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
/*
* Implementation of radial gradients following the PDF specification.
* See section 8.7.4.5.4 Type 3 (Radial) Shadings of the PDF Reference
* Manual (PDF 32000-1:2008 at the time of this writing).
*
* In the radial gradient problem we are given two circles (c₁,r₁) and
* (c₂,r₂) that define the gradient itself.
*
* Mathematically the gradient can be defined as the family of circles
*
* ((1-t)·c₁ + t·(c₂), (1-t)·r₁ + t·r₂)
*
* excluding those circles whose radius would be < 0. When a point
* belongs to more than one circle, the one with a bigger t is the only
* one that contributes to its color. When a point does not belong
* to any of the circles, it is transparent black, i.e. RGBA (0, 0, 0, 0).
* Further limitations on the range of values for t are imposed when
* the gradient is not repeated, namely t must belong to [0,1].
*
* The graphical result is the same as drawing the valid (radius > 0)
* circles with increasing t in [-inf, +inf] (or in [0,1] if the gradient
* is not repeated) using SOURCE operatior composition.
*
* It looks like a cone pointing towards the viewer if the ending circle
* is smaller than the starting one, a cone pointing inside the page if
* the starting circle is the smaller one and like a cylinder if they
* have the same radius.
*
* What we actually do is, given the point whose color we are interested
* in, compute the t values for that point, solving for t in:
*
* length((1-t)·c₁ + t·(c₂) - p) = (1-t)·r₁ + t·r₂
*
* Let's rewrite it in a simpler way, by defining some auxiliary
* variables:
*
* cd = c₂ - c₁
* pd = p - c₁
* dr = r₂ - r₁
* lenght(t·cd - pd) = r₁ + t·dr
*
* which actually means
*
* hypot(t·cdx - pdx, t·cdy - pdy) = r₁ + t·dr
*
* or
*
* ⎷((t·cdx - pdx)² + (t·cdy - pdy)²) = r₁ + t·dr.
*
* If we impose (as stated earlier) that r₁ + t·dr >= 0, it becomes:
*
* (t·cdx - pdx)² + (t·cdy - pdy)² = (r₁ + t·dr)²
*
* where we can actually expand the squares and solve for t:
*
* t²cdx² - 2t·cdx·pdx + pdx² + t²cdy² - 2t·cdy·pdy + pdy² =
* = r₁² + 2·r₁·t·dr + t²·dr²
*
* (cdx² + cdy² - dr²)t² - 2(cdx·pdx + cdy·pdy + r₁·dr)t +
* (pdx² + pdy² - r₁²) = 0
*
* A = cdx² + cdy² - dr²
* B = pdx·cdx + pdy·cdy + r₁·dr
* C = pdx² + pdy² - r₁²
* At² - 2Bt + C = 0
*
* The solutions (unless the equation degenerates because of A = 0) are:
*
* t = (B ± ⎷(B² - A·C)) / A
*
* The solution we are going to prefer is the bigger one, unless the
* radius associated to it is negative (or it falls outside the valid t
* range).
*
* Additional observations (useful for optimizations):
* A does not depend on p
*
* A < 0 <=> one of the two circles completely contains the other one
* <=> for every p, the radiuses associated with the two t solutions
* have opposite sign
*/
 
gradient_t *gradient = (gradient_t *)image;
source_image_t *source = (source_image_t *)image;
radial_gradient_t *radial = (radial_gradient_t *)image;
uint32_t *end = buffer + width;
pixman_gradient_walker_t walker;
pixman_vector_t v, unit;
 
/* reference point is the center of the pixel */
v.vector[0] = pixman_int_to_fixed (x) + pixman_fixed_1 / 2;
v.vector[1] = pixman_int_to_fixed (y) + pixman_fixed_1 / 2;
v.vector[2] = pixman_fixed_1;
 
_pixman_gradient_walker_init (&walker, gradient, source->common.repeat);
 
if (source->common.transform)
{
if (!pixman_transform_point_3d (source->common.transform, &v))
return;
unit.vector[0] = source->common.transform->matrix[0][0];
unit.vector[1] = source->common.transform->matrix[1][0];
unit.vector[2] = source->common.transform->matrix[2][0];
}
else
{
unit.vector[0] = pixman_fixed_1;
unit.vector[1] = 0;
unit.vector[2] = 0;
}
 
if (unit.vector[2] == 0 && v.vector[2] == pixman_fixed_1)
{
/*
* Given:
*
* t = (B ± ⎷(B² - A·C)) / A
*
* where
*
* A = cdx² + cdy² - dr²
* B = pdx·cdx + pdy·cdy + r₁·dr
* C = pdx² + pdy² - r₁²
* det = B² - A·C
*
* Since we have an affine transformation, we know that (pdx, pdy)
* increase linearly with each pixel,
*
* pdx = pdx₀ + n·ux,
* pdy = pdy₀ + n·uy,
*
* we can then express B, C and det through multiple differentiation.
*/
pixman_fixed_32_32_t b, db, c, dc, ddc;
 
/* warning: this computation may overflow */
v.vector[0] -= radial->c1.x;
v.vector[1] -= radial->c1.y;
 
/*
* B and C are computed and updated exactly.
* If fdot was used instead of dot, in the worst case it would
* lose 11 bits of precision in each of the multiplication and
* summing up would zero out all the bit that were preserved,
* thus making the result 0 instead of the correct one.
* This would mean a worst case of unbound relative error or
* about 2^10 absolute error
*/
b = dot (v.vector[0], v.vector[1], radial->c1.radius,
radial->delta.x, radial->delta.y, radial->delta.radius);
db = dot (unit.vector[0], unit.vector[1], 0,
radial->delta.x, radial->delta.y, 0);
 
c = dot (v.vector[0], v.vector[1],
-((pixman_fixed_48_16_t) radial->c1.radius),
v.vector[0], v.vector[1], radial->c1.radius);
dc = dot (2 * (pixman_fixed_48_16_t) v.vector[0] + unit.vector[0],
2 * (pixman_fixed_48_16_t) v.vector[1] + unit.vector[1],
0,
unit.vector[0], unit.vector[1], 0);
ddc = 2 * dot (unit.vector[0], unit.vector[1], 0,
unit.vector[0], unit.vector[1], 0);
 
while (buffer < end)
{
if (!mask || *mask++)
{
*buffer = radial_compute_color (radial->a, b, c,
radial->inva,
radial->delta.radius,
radial->mindr,
&walker,
source->common.repeat);
}
 
b += db;
c += dc;
dc += ddc;
++buffer;
}
}
else
{
/* projective */
/* Warning:
* error propagation guarantees are much looser than in the affine case
*/
while (buffer < end)
{
if (!mask || *mask++)
{
if (v.vector[2] != 0)
{
double pdx, pdy, invv2, b, c;
 
invv2 = 1. * pixman_fixed_1 / v.vector[2];
 
pdx = v.vector[0] * invv2 - radial->c1.x;
/* / pixman_fixed_1 */
 
pdy = v.vector[1] * invv2 - radial->c1.y;
/* / pixman_fixed_1 */
 
b = fdot (pdx, pdy, radial->c1.radius,
radial->delta.x, radial->delta.y,
radial->delta.radius);
/* / pixman_fixed_1 / pixman_fixed_1 */
 
c = fdot (pdx, pdy, -radial->c1.radius,
pdx, pdy, radial->c1.radius);
/* / pixman_fixed_1 / pixman_fixed_1 */
 
*buffer = radial_compute_color (radial->a, b, c,
radial->inva,
radial->delta.radius,
radial->mindr,
&walker,
source->common.repeat);
}
else
{
*buffer = 0;
}
}
++buffer;
 
v.vector[0] += unit.vector[0];
v.vector[1] += unit.vector[1];
v.vector[2] += unit.vector[2];
}
}
}
 
static void
radial_gradient_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = radial_gradient_get_scanline_32;
image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
}
 
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_radial_gradient (pixman_point_fixed_t * inner,
pixman_point_fixed_t * outer,
pixman_fixed_t inner_radius,
pixman_fixed_t outer_radius,
const pixman_gradient_stop_t *stops,
int n_stops)
{
pixman_image_t *image;
radial_gradient_t *radial;
 
image = _pixman_image_allocate ();
 
if (!image)
return NULL;
 
radial = &image->radial;
 
if (!_pixman_init_gradient (&radial->common, stops, n_stops))
{
free (image);
return NULL;
}
 
image->type = RADIAL;
 
radial->c1.x = inner->x;
radial->c1.y = inner->y;
radial->c1.radius = inner_radius;
radial->c2.x = outer->x;
radial->c2.y = outer->y;
radial->c2.radius = outer_radius;
 
/* warning: this computations may overflow */
radial->delta.x = radial->c2.x - radial->c1.x;
radial->delta.y = radial->c2.y - radial->c1.y;
radial->delta.radius = radial->c2.radius - radial->c1.radius;
 
/* computed exactly, then cast to double -> every bit of the double
representation is correct (53 bits) */
radial->a = dot (radial->delta.x, radial->delta.y, -radial->delta.radius,
radial->delta.x, radial->delta.y, radial->delta.radius);
if (radial->a != 0)
radial->inva = 1. * pixman_fixed_1 / radial->a;
 
radial->mindr = -1. * pixman_fixed_1 * radial->c1.radius;
 
image->common.property_changed = radial_gradient_property_changed;
 
return image;
}
 
/programs/develop/libraries/pixman/pixman-region.c
0,0 → 1,2769
/*
* Copyright 1987, 1988, 1989, 1998 The Open Group
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation.
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the name of The Open Group shall not be
* used in advertising or otherwise to promote the sale, use or other dealings
* in this Software without prior written authorization from The Open Group.
*
* Copyright 1987, 1988, 1989 by
* Digital Equipment Corporation, Maynard, Massachusetts.
*
* All Rights Reserved
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose and without fee is hereby granted,
* provided that the above copyright notice appear in all copies and that
* both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of Digital not be
* used in advertising or publicity pertaining to distribution of the
* software without specific, written prior permission.
*
* DIGITAL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
* ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
* DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
* ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
* ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Copyright © 1998 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
 
#include <stdlib.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include "pixman-private.h"
 
#define PIXREGION_NIL(reg) ((reg)->data && !(reg)->data->numRects)
/* not a region */
#define PIXREGION_NAR(reg) ((reg)->data == pixman_broken_data)
#define PIXREGION_NUMRECTS(reg) ((reg)->data ? (reg)->data->numRects : 1)
#define PIXREGION_SIZE(reg) ((reg)->data ? (reg)->data->size : 0)
#define PIXREGION_RECTS(reg) \
((reg)->data ? (box_type_t *)((reg)->data + 1) \
: &(reg)->extents)
#define PIXREGION_BOXPTR(reg) ((box_type_t *)((reg)->data + 1))
#define PIXREGION_BOX(reg, i) (&PIXREGION_BOXPTR (reg)[i])
#define PIXREGION_TOP(reg) PIXREGION_BOX (reg, (reg)->data->numRects)
#define PIXREGION_END(reg) PIXREGION_BOX (reg, (reg)->data->numRects - 1)
 
#define GOOD_RECT(rect) ((rect)->x1 < (rect)->x2 && (rect)->y1 < (rect)->y2)
#define BAD_RECT(rect) ((rect)->x1 > (rect)->x2 || (rect)->y1 > (rect)->y2)
 
#ifdef DEBUG
 
#define GOOD(reg) \
do \
{ \
if (!PREFIX (_selfcheck (reg))) \
_pixman_log_error (FUNC, "Malformed region " # reg); \
} while (0)
 
#else
 
#define GOOD(reg)
 
#endif
 
static const box_type_t PREFIX (_empty_box_) = { 0, 0, 0, 0 };
static const region_data_type_t PREFIX (_empty_data_) = { 0, 0 };
static const region_data_type_t PREFIX (_broken_data_) = { 0, 0 };
 
static box_type_t *pixman_region_empty_box =
(box_type_t *)&PREFIX (_empty_box_);
static region_data_type_t *pixman_region_empty_data =
(region_data_type_t *)&PREFIX (_empty_data_);
static region_data_type_t *pixman_broken_data =
(region_data_type_t *)&PREFIX (_broken_data_);
 
static pixman_bool_t
pixman_break (region_type_t *region);
 
/*
* The functions in this file implement the Region abstraction used extensively
* throughout the X11 sample server. A Region is simply a set of disjoint
* (non-overlapping) rectangles, plus an "extent" rectangle which is the
* smallest single rectangle that contains all the non-overlapping rectangles.
*
* A Region is implemented as a "y-x-banded" array of rectangles. This array
* imposes two degrees of order. First, all rectangles are sorted by top side
* y coordinate first (y1), and then by left side x coordinate (x1).
*
* Furthermore, the rectangles are grouped into "bands". Each rectangle in a
* band has the same top y coordinate (y1), and each has the same bottom y
* coordinate (y2). Thus all rectangles in a band differ only in their left
* and right side (x1 and x2). Bands are implicit in the array of rectangles:
* there is no separate list of band start pointers.
*
* The y-x band representation does not minimize rectangles. In particular,
* if a rectangle vertically crosses a band (the rectangle has scanlines in
* the y1 to y2 area spanned by the band), then the rectangle may be broken
* down into two or more smaller rectangles stacked one atop the other.
*
* ----------- -----------
* | | | | band 0
* | | -------- ----------- --------
* | | | | in y-x banded | | | | band 1
* | | | | form is | | | |
* ----------- | | ----------- --------
* | | | | band 2
* -------- --------
*
* An added constraint on the rectangles is that they must cover as much
* horizontal area as possible: no two rectangles within a band are allowed
* to touch.
*
* Whenever possible, bands will be merged together to cover a greater vertical
* distance (and thus reduce the number of rectangles). Two bands can be merged
* only if the bottom of one touches the top of the other and they have
* rectangles in the same places (of the same width, of course).
*
* Adam de Boor wrote most of the original region code. Joel McCormack
* substantially modified or rewrote most of the core arithmetic routines, and
* added pixman_region_validate in order to support several speed improvements
* to pixman_region_validate_tree. Bob Scheifler changed the representation
* to be more compact when empty or a single rectangle, and did a bunch of
* gratuitous reformatting. Carl Worth did further gratuitous reformatting
* while re-merging the server and client region code into libpixregion.
* Soren Sandmann did even more gratuitous reformatting.
*/
 
/* true iff two Boxes overlap */
#define EXTENTCHECK(r1, r2) \
(!( ((r1)->x2 <= (r2)->x1) || \
((r1)->x1 >= (r2)->x2) || \
((r1)->y2 <= (r2)->y1) || \
((r1)->y1 >= (r2)->y2) ) )
 
/* true iff (x,y) is in Box */
#define INBOX(r, x, y) \
( ((r)->x2 > x) && \
((r)->x1 <= x) && \
((r)->y2 > y) && \
((r)->y1 <= y) )
 
/* true iff Box r1 contains Box r2 */
#define SUBSUMES(r1, r2) \
( ((r1)->x1 <= (r2)->x1) && \
((r1)->x2 >= (r2)->x2) && \
((r1)->y1 <= (r2)->y1) && \
((r1)->y2 >= (r2)->y2) )
 
static size_t
PIXREGION_SZOF (size_t n)
{
size_t size = n * sizeof(box_type_t);
if (n > UINT32_MAX / sizeof(box_type_t))
return 0;
 
if (sizeof(region_data_type_t) > UINT32_MAX - size)
return 0;
 
return size + sizeof(region_data_type_t);
}
 
static void *
alloc_data (size_t n)
{
size_t sz = PIXREGION_SZOF (n);
 
if (!sz)
return NULL;
 
return malloc (sz);
}
 
#define FREE_DATA(reg) if ((reg)->data && (reg)->data->size) free ((reg)->data)
 
#define RECTALLOC_BAIL(region, n, bail) \
do \
{ \
if (!(region)->data || \
(((region)->data->numRects + (n)) > (region)->data->size)) \
{ \
if (!pixman_rect_alloc (region, n)) \
goto bail; \
} \
} while (0)
 
#define RECTALLOC(region, n) \
do \
{ \
if (!(region)->data || \
(((region)->data->numRects + (n)) > (region)->data->size)) \
{ \
if (!pixman_rect_alloc (region, n)) { \
return FALSE; \
} \
} \
} while (0)
 
#define ADDRECT(next_rect, nx1, ny1, nx2, ny2) \
do \
{ \
next_rect->x1 = nx1; \
next_rect->y1 = ny1; \
next_rect->x2 = nx2; \
next_rect->y2 = ny2; \
next_rect++; \
} \
while (0)
 
#define NEWRECT(region, next_rect, nx1, ny1, nx2, ny2) \
do \
{ \
if (!(region)->data || \
((region)->data->numRects == (region)->data->size)) \
{ \
if (!pixman_rect_alloc (region, 1)) \
return FALSE; \
next_rect = PIXREGION_TOP (region); \
} \
ADDRECT (next_rect, nx1, ny1, nx2, ny2); \
region->data->numRects++; \
critical_if_fail (region->data->numRects <= region->data->size); \
} while (0)
 
#define DOWNSIZE(reg, numRects) \
do \
{ \
if (((numRects) < ((reg)->data->size >> 1)) && \
((reg)->data->size > 50)) \
{ \
region_data_type_t * new_data; \
size_t data_size = PIXREGION_SZOF (numRects); \
\
if (!data_size) \
{ \
new_data = NULL; \
} \
else \
{ \
new_data = (region_data_type_t *) \
realloc ((reg)->data, data_size); \
} \
\
if (new_data) \
{ \
new_data->size = (numRects); \
(reg)->data = new_data; \
} \
} \
} while (0)
 
PIXMAN_EXPORT pixman_bool_t
PREFIX (_equal) (region_type_t *reg1, region_type_t *reg2)
{
int i;
box_type_t *rects1;
box_type_t *rects2;
 
if (reg1->extents.x1 != reg2->extents.x1)
return FALSE;
if (reg1->extents.x2 != reg2->extents.x2)
return FALSE;
if (reg1->extents.y1 != reg2->extents.y1)
return FALSE;
if (reg1->extents.y2 != reg2->extents.y2)
return FALSE;
if (PIXREGION_NUMRECTS (reg1) != PIXREGION_NUMRECTS (reg2))
return FALSE;
 
rects1 = PIXREGION_RECTS (reg1);
rects2 = PIXREGION_RECTS (reg2);
for (i = 0; i != PIXREGION_NUMRECTS (reg1); i++)
{
if (rects1[i].x1 != rects2[i].x1)
return FALSE;
if (rects1[i].x2 != rects2[i].x2)
return FALSE;
if (rects1[i].y1 != rects2[i].y1)
return FALSE;
if (rects1[i].y2 != rects2[i].y2)
return FALSE;
}
 
return TRUE;
}
 
int
PREFIX (_print) (region_type_t *rgn)
{
int num, size;
int i;
box_type_t * rects;
 
num = PIXREGION_NUMRECTS (rgn);
size = PIXREGION_SIZE (rgn);
rects = PIXREGION_RECTS (rgn);
 
fprintf (stderr, "num: %d size: %d\n", num, size);
fprintf (stderr, "extents: %d %d %d %d\n",
rgn->extents.x1,
rgn->extents.y1,
rgn->extents.x2,
rgn->extents.y2);
for (i = 0; i < num; i++)
{
fprintf (stderr, "%d %d %d %d \n",
rects[i].x1, rects[i].y1, rects[i].x2, rects[i].y2);
}
fprintf (stderr, "\n");
 
return(num);
}
 
 
PIXMAN_EXPORT void
PREFIX (_init) (region_type_t *region)
{
region->extents = *pixman_region_empty_box;
region->data = pixman_region_empty_data;
}
 
PIXMAN_EXPORT void
PREFIX (_init_rect) (region_type_t * region,
int x,
int y,
unsigned int width,
unsigned int height)
{
region->extents.x1 = x;
region->extents.y1 = y;
region->extents.x2 = x + width;
region->extents.y2 = y + height;
 
if (!GOOD_RECT (&region->extents))
{
if (BAD_RECT (&region->extents))
_pixman_log_error (FUNC, "Invalid rectangle passed");
PREFIX (_init) (region);
return;
}
 
region->data = NULL;
}
 
PIXMAN_EXPORT void
PREFIX (_init_with_extents) (region_type_t *region, box_type_t *extents)
{
if (!GOOD_RECT (extents))
{
if (BAD_RECT (extents))
_pixman_log_error (FUNC, "Invalid rectangle passed");
PREFIX (_init) (region);
return;
}
region->extents = *extents;
 
region->data = NULL;
}
 
PIXMAN_EXPORT void
PREFIX (_fini) (region_type_t *region)
{
GOOD (region);
FREE_DATA (region);
}
 
PIXMAN_EXPORT int
PREFIX (_n_rects) (region_type_t *region)
{
return PIXREGION_NUMRECTS (region);
}
 
PIXMAN_EXPORT box_type_t *
PREFIX (_rectangles) (region_type_t *region,
int *n_rects)
{
if (n_rects)
*n_rects = PIXREGION_NUMRECTS (region);
 
return PIXREGION_RECTS (region);
}
 
static pixman_bool_t
pixman_break (region_type_t *region)
{
FREE_DATA (region);
 
region->extents = *pixman_region_empty_box;
region->data = pixman_broken_data;
 
return FALSE;
}
 
static pixman_bool_t
pixman_rect_alloc (region_type_t * region,
int n)
{
region_data_type_t *data;
 
if (!region->data)
{
n++;
region->data = alloc_data (n);
 
if (!region->data)
return pixman_break (region);
 
region->data->numRects = 1;
*PIXREGION_BOXPTR (region) = region->extents;
}
else if (!region->data->size)
{
region->data = alloc_data (n);
 
if (!region->data)
return pixman_break (region);
 
region->data->numRects = 0;
}
else
{
size_t data_size;
 
if (n == 1)
{
n = region->data->numRects;
if (n > 500) /* XXX pick numbers out of a hat */
n = 250;
}
 
n += region->data->numRects;
data_size = PIXREGION_SZOF (n);
 
if (!data_size)
{
data = NULL;
}
else
{
data = (region_data_type_t *)
realloc (region->data, PIXREGION_SZOF (n));
}
if (!data)
return pixman_break (region);
region->data = data;
}
region->data->size = n;
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
PREFIX (_copy) (region_type_t *dst, region_type_t *src)
{
GOOD (dst);
GOOD (src);
 
if (dst == src)
return TRUE;
dst->extents = src->extents;
 
if (!src->data || !src->data->size)
{
FREE_DATA (dst);
dst->data = src->data;
return TRUE;
}
if (!dst->data || (dst->data->size < src->data->numRects))
{
FREE_DATA (dst);
 
dst->data = alloc_data (src->data->numRects);
 
if (!dst->data)
return pixman_break (dst);
 
dst->data->size = src->data->numRects;
}
 
dst->data->numRects = src->data->numRects;
 
memmove ((char *)PIXREGION_BOXPTR (dst), (char *)PIXREGION_BOXPTR (src),
dst->data->numRects * sizeof(box_type_t));
 
return TRUE;
}
 
/*======================================================================
* Generic Region Operator
*====================================================================*/
 
/*-
*-----------------------------------------------------------------------
* pixman_coalesce --
* Attempt to merge the boxes in the current band with those in the
* previous one. We are guaranteed that the current band extends to
* the end of the rects array. Used only by pixman_op.
*
* Results:
* The new index for the previous band.
*
* Side Effects:
* If coalescing takes place:
* - rectangles in the previous band will have their y2 fields
* altered.
* - region->data->numRects will be decreased.
*
*-----------------------------------------------------------------------
*/
static inline int
pixman_coalesce (region_type_t * region, /* Region to coalesce */
int prev_start, /* Index of start of previous band */
int cur_start) /* Index of start of current band */
{
box_type_t *prev_box; /* Current box in previous band */
box_type_t *cur_box; /* Current box in current band */
int numRects; /* Number rectangles in both bands */
int y2; /* Bottom of current band */
 
/*
* Figure out how many rectangles are in the band.
*/
numRects = cur_start - prev_start;
critical_if_fail (numRects == region->data->numRects - cur_start);
 
if (!numRects) return cur_start;
 
/*
* The bands may only be coalesced if the bottom of the previous
* matches the top scanline of the current.
*/
prev_box = PIXREGION_BOX (region, prev_start);
cur_box = PIXREGION_BOX (region, cur_start);
if (prev_box->y2 != cur_box->y1) return cur_start;
 
/*
* Make sure the bands have boxes in the same places. This
* assumes that boxes have been added in such a way that they
* cover the most area possible. I.e. two boxes in a band must
* have some horizontal space between them.
*/
y2 = cur_box->y2;
 
do
{
if ((prev_box->x1 != cur_box->x1) || (prev_box->x2 != cur_box->x2))
return (cur_start);
prev_box++;
cur_box++;
numRects--;
}
while (numRects);
 
/*
* The bands may be merged, so set the bottom y of each box
* in the previous band to the bottom y of the current band.
*/
numRects = cur_start - prev_start;
region->data->numRects -= numRects;
 
do
{
prev_box--;
prev_box->y2 = y2;
numRects--;
}
while (numRects);
 
return prev_start;
}
 
/* Quicky macro to avoid trivial reject procedure calls to pixman_coalesce */
 
#define COALESCE(new_reg, prev_band, cur_band) \
do \
{ \
if (cur_band - prev_band == new_reg->data->numRects - cur_band) \
prev_band = pixman_coalesce (new_reg, prev_band, cur_band); \
else \
prev_band = cur_band; \
} while (0)
 
/*-
*-----------------------------------------------------------------------
* pixman_region_append_non_o --
* Handle a non-overlapping band for the union and subtract operations.
* Just adds the (top/bottom-clipped) rectangles into the region.
* Doesn't have to check for subsumption or anything.
*
* Results:
* None.
*
* Side Effects:
* region->data->numRects is incremented and the rectangles overwritten
* with the rectangles we're passed.
*
*-----------------------------------------------------------------------
*/
static inline pixman_bool_t
pixman_region_append_non_o (region_type_t * region,
box_type_t * r,
box_type_t * r_end,
int y1,
int y2)
{
box_type_t *next_rect;
int new_rects;
 
new_rects = r_end - r;
 
critical_if_fail (y1 < y2);
critical_if_fail (new_rects != 0);
 
/* Make sure we have enough space for all rectangles to be added */
RECTALLOC (region, new_rects);
next_rect = PIXREGION_TOP (region);
region->data->numRects += new_rects;
 
do
{
critical_if_fail (r->x1 < r->x2);
ADDRECT (next_rect, r->x1, y1, r->x2, y2);
r++;
}
while (r != r_end);
 
return TRUE;
}
 
#define FIND_BAND(r, r_band_end, r_end, ry1) \
do \
{ \
ry1 = r->y1; \
r_band_end = r + 1; \
while ((r_band_end != r_end) && (r_band_end->y1 == ry1)) { \
r_band_end++; \
} \
} while (0)
 
#define APPEND_REGIONS(new_reg, r, r_end) \
do \
{ \
int new_rects; \
if ((new_rects = r_end - r)) { \
RECTALLOC_BAIL (new_reg, new_rects, bail); \
memmove ((char *)PIXREGION_TOP (new_reg), (char *)r, \
new_rects * sizeof(box_type_t)); \
new_reg->data->numRects += new_rects; \
} \
} while (0)
 
/*-
*-----------------------------------------------------------------------
* pixman_op --
* Apply an operation to two regions. Called by pixman_region_union, pixman_region_inverse,
* pixman_region_subtract, pixman_region_intersect.... Both regions MUST have at least one
* rectangle, and cannot be the same object.
*
* Results:
* TRUE if successful.
*
* Side Effects:
* The new region is overwritten.
* overlap set to TRUE if overlap_func ever returns TRUE.
*
* Notes:
* The idea behind this function is to view the two regions as sets.
* Together they cover a rectangle of area that this function divides
* into horizontal bands where points are covered only by one region
* or by both. For the first case, the non_overlap_func is called with
* each the band and the band's upper and lower extents. For the
* second, the overlap_func is called to process the entire band. It
* is responsible for clipping the rectangles in the band, though
* this function provides the boundaries.
* At the end of each band, the new region is coalesced, if possible,
* to reduce the number of rectangles in the region.
*
*-----------------------------------------------------------------------
*/
 
typedef pixman_bool_t (*overlap_proc_ptr) (region_type_t *region,
box_type_t * r1,
box_type_t * r1_end,
box_type_t * r2,
box_type_t * r2_end,
int y1,
int y2,
int * overlap);
 
static pixman_bool_t
pixman_op (region_type_t * new_reg, /* Place to store result */
region_type_t * reg1, /* First region in operation */
region_type_t * reg2, /* 2d region in operation */
overlap_proc_ptr overlap_func, /* Function to call for over-
* lapping bands */
int append_non1, /* Append non-overlapping bands
* in region 1 ?
*/
int append_non2, /* Append non-overlapping bands
* in region 2 ?
*/
int * overlap)
{
box_type_t *r1; /* Pointer into first region */
box_type_t *r2; /* Pointer into 2d region */
box_type_t *r1_end; /* End of 1st region */
box_type_t *r2_end; /* End of 2d region */
int ybot; /* Bottom of intersection */
int ytop; /* Top of intersection */
region_data_type_t *old_data; /* Old data for new_reg */
int prev_band; /* Index of start of
* previous band in new_reg */
int cur_band; /* Index of start of current
* band in new_reg */
box_type_t * r1_band_end; /* End of current band in r1 */
box_type_t * r2_band_end; /* End of current band in r2 */
int top; /* Top of non-overlapping band */
int bot; /* Bottom of non-overlapping band*/
int r1y1; /* Temps for r1->y1 and r2->y1 */
int r2y1;
int new_size;
int numRects;
 
/*
* Break any region computed from a broken region
*/
if (PIXREGION_NAR (reg1) || PIXREGION_NAR (reg2))
return pixman_break (new_reg);
 
/*
* Initialization:
* set r1, r2, r1_end and r2_end appropriately, save the rectangles
* of the destination region until the end in case it's one of
* the two source regions, then mark the "new" region empty, allocating
* another array of rectangles for it to use.
*/
 
r1 = PIXREGION_RECTS (reg1);
new_size = PIXREGION_NUMRECTS (reg1);
r1_end = r1 + new_size;
 
numRects = PIXREGION_NUMRECTS (reg2);
r2 = PIXREGION_RECTS (reg2);
r2_end = r2 + numRects;
critical_if_fail (r1 != r1_end);
critical_if_fail (r2 != r2_end);
 
old_data = (region_data_type_t *)NULL;
 
if (((new_reg == reg1) && (new_size > 1)) ||
((new_reg == reg2) && (numRects > 1)))
{
old_data = new_reg->data;
new_reg->data = pixman_region_empty_data;
}
 
/* guess at new size */
if (numRects > new_size)
new_size = numRects;
 
new_size <<= 1;
 
if (!new_reg->data)
new_reg->data = pixman_region_empty_data;
else if (new_reg->data->size)
new_reg->data->numRects = 0;
 
if (new_size > new_reg->data->size)
{
if (!pixman_rect_alloc (new_reg, new_size))
{
if (old_data)
free (old_data);
return FALSE;
}
}
 
/*
* Initialize ybot.
* In the upcoming loop, ybot and ytop serve different functions depending
* on whether the band being handled is an overlapping or non-overlapping
* band.
* In the case of a non-overlapping band (only one of the regions
* has points in the band), ybot is the bottom of the most recent
* intersection and thus clips the top of the rectangles in that band.
* ytop is the top of the next intersection between the two regions and
* serves to clip the bottom of the rectangles in the current band.
* For an overlapping band (where the two regions intersect), ytop clips
* the top of the rectangles of both regions and ybot clips the bottoms.
*/
 
ybot = MIN (r1->y1, r2->y1);
 
/*
* prev_band serves to mark the start of the previous band so rectangles
* can be coalesced into larger rectangles. qv. pixman_coalesce, above.
* In the beginning, there is no previous band, so prev_band == cur_band
* (cur_band is set later on, of course, but the first band will always
* start at index 0). prev_band and cur_band must be indices because of
* the possible expansion, and resultant moving, of the new region's
* array of rectangles.
*/
prev_band = 0;
 
do
{
/*
* This algorithm proceeds one source-band (as opposed to a
* destination band, which is determined by where the two regions
* intersect) at a time. r1_band_end and r2_band_end serve to mark the
* rectangle after the last one in the current band for their
* respective regions.
*/
critical_if_fail (r1 != r1_end);
critical_if_fail (r2 != r2_end);
 
FIND_BAND (r1, r1_band_end, r1_end, r1y1);
FIND_BAND (r2, r2_band_end, r2_end, r2y1);
 
/*
* First handle the band that doesn't intersect, if any.
*
* Note that attention is restricted to one band in the
* non-intersecting region at once, so if a region has n
* bands between the current position and the next place it overlaps
* the other, this entire loop will be passed through n times.
*/
if (r1y1 < r2y1)
{
if (append_non1)
{
top = MAX (r1y1, ybot);
bot = MIN (r1->y2, r2y1);
if (top != bot)
{
cur_band = new_reg->data->numRects;
if (!pixman_region_append_non_o (new_reg, r1, r1_band_end, top, bot))
goto bail;
COALESCE (new_reg, prev_band, cur_band);
}
}
ytop = r2y1;
}
else if (r2y1 < r1y1)
{
if (append_non2)
{
top = MAX (r2y1, ybot);
bot = MIN (r2->y2, r1y1);
if (top != bot)
{
cur_band = new_reg->data->numRects;
 
if (!pixman_region_append_non_o (new_reg, r2, r2_band_end, top, bot))
goto bail;
 
COALESCE (new_reg, prev_band, cur_band);
}
}
ytop = r1y1;
}
else
{
ytop = r1y1;
}
 
/*
* Now see if we've hit an intersecting band. The two bands only
* intersect if ybot > ytop
*/
ybot = MIN (r1->y2, r2->y2);
if (ybot > ytop)
{
cur_band = new_reg->data->numRects;
 
if (!(*overlap_func)(new_reg,
r1, r1_band_end,
r2, r2_band_end,
ytop, ybot,
overlap))
{
goto bail;
}
COALESCE (new_reg, prev_band, cur_band);
}
 
/*
* If we've finished with a band (y2 == ybot) we skip forward
* in the region to the next band.
*/
if (r1->y2 == ybot)
r1 = r1_band_end;
 
if (r2->y2 == ybot)
r2 = r2_band_end;
 
}
while (r1 != r1_end && r2 != r2_end);
 
/*
* Deal with whichever region (if any) still has rectangles left.
*
* We only need to worry about banding and coalescing for the very first
* band left. After that, we can just group all remaining boxes,
* regardless of how many bands, into one final append to the list.
*/
 
if ((r1 != r1_end) && append_non1)
{
/* Do first non_overlap1Func call, which may be able to coalesce */
FIND_BAND (r1, r1_band_end, r1_end, r1y1);
cur_band = new_reg->data->numRects;
if (!pixman_region_append_non_o (new_reg,
r1, r1_band_end,
MAX (r1y1, ybot), r1->y2))
{
goto bail;
}
COALESCE (new_reg, prev_band, cur_band);
 
/* Just append the rest of the boxes */
APPEND_REGIONS (new_reg, r1_band_end, r1_end);
}
else if ((r2 != r2_end) && append_non2)
{
/* Do first non_overlap2Func call, which may be able to coalesce */
FIND_BAND (r2, r2_band_end, r2_end, r2y1);
 
cur_band = new_reg->data->numRects;
 
if (!pixman_region_append_non_o (new_reg,
r2, r2_band_end,
MAX (r2y1, ybot), r2->y2))
{
goto bail;
}
 
COALESCE (new_reg, prev_band, cur_band);
 
/* Append rest of boxes */
APPEND_REGIONS (new_reg, r2_band_end, r2_end);
}
 
if (old_data)
free (old_data);
 
if (!(numRects = new_reg->data->numRects))
{
FREE_DATA (new_reg);
new_reg->data = pixman_region_empty_data;
}
else if (numRects == 1)
{
new_reg->extents = *PIXREGION_BOXPTR (new_reg);
FREE_DATA (new_reg);
new_reg->data = (region_data_type_t *)NULL;
}
else
{
DOWNSIZE (new_reg, numRects);
}
 
return TRUE;
 
bail:
if (old_data)
free (old_data);
 
return pixman_break (new_reg);
}
 
/*-
*-----------------------------------------------------------------------
* pixman_set_extents --
* Reset the extents of a region to what they should be. Called by
* pixman_region_subtract and pixman_region_intersect as they can't
* figure it out along the way or do so easily, as pixman_region_union can.
*
* Results:
* None.
*
* Side Effects:
* The region's 'extents' structure is overwritten.
*
*-----------------------------------------------------------------------
*/
static void
pixman_set_extents (region_type_t *region)
{
box_type_t *box, *box_end;
 
if (!region->data)
return;
 
if (!region->data->size)
{
region->extents.x2 = region->extents.x1;
region->extents.y2 = region->extents.y1;
return;
}
 
box = PIXREGION_BOXPTR (region);
box_end = PIXREGION_END (region);
 
/*
* Since box is the first rectangle in the region, it must have the
* smallest y1 and since box_end is the last rectangle in the region,
* it must have the largest y2, because of banding. Initialize x1 and
* x2 from box and box_end, resp., as good things to initialize them
* to...
*/
region->extents.x1 = box->x1;
region->extents.y1 = box->y1;
region->extents.x2 = box_end->x2;
region->extents.y2 = box_end->y2;
 
critical_if_fail (region->extents.y1 < region->extents.y2);
 
while (box <= box_end)
{
if (box->x1 < region->extents.x1)
region->extents.x1 = box->x1;
if (box->x2 > region->extents.x2)
region->extents.x2 = box->x2;
box++;
}
 
critical_if_fail (region->extents.x1 < region->extents.x2);
}
 
/*======================================================================
* Region Intersection
*====================================================================*/
/*-
*-----------------------------------------------------------------------
* pixman_region_intersect_o --
* Handle an overlapping band for pixman_region_intersect.
*
* Results:
* TRUE if successful.
*
* Side Effects:
* Rectangles may be added to the region.
*
*-----------------------------------------------------------------------
*/
/*ARGSUSED*/
static pixman_bool_t
pixman_region_intersect_o (region_type_t *region,
box_type_t * r1,
box_type_t * r1_end,
box_type_t * r2,
box_type_t * r2_end,
int y1,
int y2,
int * overlap)
{
int x1;
int x2;
box_type_t * next_rect;
 
next_rect = PIXREGION_TOP (region);
 
critical_if_fail (y1 < y2);
critical_if_fail (r1 != r1_end && r2 != r2_end);
 
do
{
x1 = MAX (r1->x1, r2->x1);
x2 = MIN (r1->x2, r2->x2);
 
/*
* If there's any overlap between the two rectangles, add that
* overlap to the new region.
*/
if (x1 < x2)
NEWRECT (region, next_rect, x1, y1, x2, y2);
 
/*
* Advance the pointer(s) with the leftmost right side, since the next
* rectangle on that list may still overlap the other region's
* current rectangle.
*/
if (r1->x2 == x2)
{
r1++;
}
if (r2->x2 == x2)
{
r2++;
}
}
while ((r1 != r1_end) && (r2 != r2_end));
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
PREFIX (_intersect) (region_type_t * new_reg,
region_type_t * reg1,
region_type_t * reg2)
{
GOOD (reg1);
GOOD (reg2);
GOOD (new_reg);
 
/* check for trivial reject */
if (PIXREGION_NIL (reg1) || PIXREGION_NIL (reg2) ||
!EXTENTCHECK (&reg1->extents, &reg2->extents))
{
/* Covers about 20% of all cases */
FREE_DATA (new_reg);
new_reg->extents.x2 = new_reg->extents.x1;
new_reg->extents.y2 = new_reg->extents.y1;
if (PIXREGION_NAR (reg1) || PIXREGION_NAR (reg2))
{
new_reg->data = pixman_broken_data;
return FALSE;
}
else
{
new_reg->data = pixman_region_empty_data;
}
}
else if (!reg1->data && !reg2->data)
{
/* Covers about 80% of cases that aren't trivially rejected */
new_reg->extents.x1 = MAX (reg1->extents.x1, reg2->extents.x1);
new_reg->extents.y1 = MAX (reg1->extents.y1, reg2->extents.y1);
new_reg->extents.x2 = MIN (reg1->extents.x2, reg2->extents.x2);
new_reg->extents.y2 = MIN (reg1->extents.y2, reg2->extents.y2);
 
FREE_DATA (new_reg);
 
new_reg->data = (region_data_type_t *)NULL;
}
else if (!reg2->data && SUBSUMES (&reg2->extents, &reg1->extents))
{
return PREFIX (_copy) (new_reg, reg1);
}
else if (!reg1->data && SUBSUMES (&reg1->extents, &reg2->extents))
{
return PREFIX (_copy) (new_reg, reg2);
}
else if (reg1 == reg2)
{
return PREFIX (_copy) (new_reg, reg1);
}
else
{
/* General purpose intersection */
int overlap; /* result ignored */
 
if (!pixman_op (new_reg, reg1, reg2, pixman_region_intersect_o, FALSE, FALSE,
&overlap))
{
return FALSE;
}
pixman_set_extents (new_reg);
}
 
GOOD (new_reg);
return(TRUE);
}
 
#define MERGERECT(r) \
do \
{ \
if (r->x1 <= x2) \
{ \
/* Merge with current rectangle */ \
if (r->x1 < x2) \
*overlap = TRUE; \
\
if (x2 < r->x2) \
x2 = r->x2; \
} \
else \
{ \
/* Add current rectangle, start new one */ \
NEWRECT (region, next_rect, x1, y1, x2, y2); \
x1 = r->x1; \
x2 = r->x2; \
} \
r++; \
} while (0)
 
/*======================================================================
* Region Union
*====================================================================*/
 
/*-
*-----------------------------------------------------------------------
* pixman_region_union_o --
* Handle an overlapping band for the union operation. Picks the
* left-most rectangle each time and merges it into the region.
*
* Results:
* TRUE if successful.
*
* Side Effects:
* region is overwritten.
* overlap is set to TRUE if any boxes overlap.
*
*-----------------------------------------------------------------------
*/
static pixman_bool_t
pixman_region_union_o (region_type_t *region,
box_type_t * r1,
box_type_t * r1_end,
box_type_t * r2,
box_type_t * r2_end,
int y1,
int y2,
int * overlap)
{
box_type_t *next_rect;
int x1; /* left and right side of current union */
int x2;
 
critical_if_fail (y1 < y2);
critical_if_fail (r1 != r1_end && r2 != r2_end);
 
next_rect = PIXREGION_TOP (region);
 
/* Start off current rectangle */
if (r1->x1 < r2->x1)
{
x1 = r1->x1;
x2 = r1->x2;
r1++;
}
else
{
x1 = r2->x1;
x2 = r2->x2;
r2++;
}
while (r1 != r1_end && r2 != r2_end)
{
if (r1->x1 < r2->x1)
MERGERECT (r1);
else
MERGERECT (r2);
}
 
/* Finish off whoever (if any) is left */
if (r1 != r1_end)
{
do
{
MERGERECT (r1);
}
while (r1 != r1_end);
}
else if (r2 != r2_end)
{
do
{
MERGERECT (r2);
}
while (r2 != r2_end);
}
 
/* Add current rectangle */
NEWRECT (region, next_rect, x1, y1, x2, y2);
 
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
PREFIX(_intersect_rect) (region_type_t *dest,
region_type_t *source,
int x, int y,
unsigned int width,
unsigned int height)
{
region_type_t region;
 
region.data = NULL;
region.extents.x1 = x;
region.extents.y1 = y;
region.extents.x2 = x + width;
region.extents.y2 = y + height;
 
return PREFIX(_intersect) (dest, source, &region);
}
 
/* Convenience function for performing union of region with a
* single rectangle
*/
PIXMAN_EXPORT pixman_bool_t
PREFIX (_union_rect) (region_type_t *dest,
region_type_t *source,
int x,
int y,
unsigned int width,
unsigned int height)
{
region_type_t region;
 
region.extents.x1 = x;
region.extents.y1 = y;
region.extents.x2 = x + width;
region.extents.y2 = y + height;
 
if (!GOOD_RECT (&region.extents))
{
if (BAD_RECT (&region.extents))
_pixman_log_error (FUNC, "Invalid rectangle passed");
return PREFIX (_copy) (dest, source);
}
 
region.data = NULL;
 
return PREFIX (_union) (dest, source, &region);
}
 
PIXMAN_EXPORT pixman_bool_t
PREFIX (_union) (region_type_t *new_reg,
region_type_t *reg1,
region_type_t *reg2)
{
int overlap; /* result ignored */
 
/* Return TRUE if some overlap
* between reg1, reg2
*/
GOOD (reg1);
GOOD (reg2);
GOOD (new_reg);
 
/* checks all the simple cases */
 
/*
* Region 1 and 2 are the same
*/
if (reg1 == reg2)
return PREFIX (_copy) (new_reg, reg1);
 
/*
* Region 1 is empty
*/
if (PIXREGION_NIL (reg1))
{
if (PIXREGION_NAR (reg1))
return pixman_break (new_reg);
 
if (new_reg != reg2)
return PREFIX (_copy) (new_reg, reg2);
 
return TRUE;
}
 
/*
* Region 2 is empty
*/
if (PIXREGION_NIL (reg2))
{
if (PIXREGION_NAR (reg2))
return pixman_break (new_reg);
 
if (new_reg != reg1)
return PREFIX (_copy) (new_reg, reg1);
 
return TRUE;
}
 
/*
* Region 1 completely subsumes region 2
*/
if (!reg1->data && SUBSUMES (&reg1->extents, &reg2->extents))
{
if (new_reg != reg1)
return PREFIX (_copy) (new_reg, reg1);
 
return TRUE;
}
 
/*
* Region 2 completely subsumes region 1
*/
if (!reg2->data && SUBSUMES (&reg2->extents, &reg1->extents))
{
if (new_reg != reg2)
return PREFIX (_copy) (new_reg, reg2);
 
return TRUE;
}
 
if (!pixman_op (new_reg, reg1, reg2, pixman_region_union_o, TRUE, TRUE, &overlap))
return FALSE;
 
new_reg->extents.x1 = MIN (reg1->extents.x1, reg2->extents.x1);
new_reg->extents.y1 = MIN (reg1->extents.y1, reg2->extents.y1);
new_reg->extents.x2 = MAX (reg1->extents.x2, reg2->extents.x2);
new_reg->extents.y2 = MAX (reg1->extents.y2, reg2->extents.y2);
GOOD (new_reg);
 
return TRUE;
}
 
/*======================================================================
* Batch Rectangle Union
*====================================================================*/
 
#define EXCHANGE_RECTS(a, b) \
{ \
box_type_t t; \
t = rects[a]; \
rects[a] = rects[b]; \
rects[b] = t; \
}
 
static void
quick_sort_rects (
box_type_t rects[],
int numRects)
{
int y1;
int x1;
int i, j;
box_type_t *r;
 
/* Always called with numRects > 1 */
 
do
{
if (numRects == 2)
{
if (rects[0].y1 > rects[1].y1 ||
(rects[0].y1 == rects[1].y1 && rects[0].x1 > rects[1].x1))
{
EXCHANGE_RECTS (0, 1);
}
 
return;
}
 
/* Choose partition element, stick in location 0 */
EXCHANGE_RECTS (0, numRects >> 1);
y1 = rects[0].y1;
x1 = rects[0].x1;
 
/* Partition array */
i = 0;
j = numRects;
 
do
{
r = &(rects[i]);
do
{
r++;
i++;
}
 
while (i != numRects && (r->y1 < y1 || (r->y1 == y1 && r->x1 < x1)))
;
 
r = &(rects[j]);
do
{
r--;
j--;
}
while (y1 < r->y1 || (y1 == r->y1 && x1 < r->x1));
if (i < j)
EXCHANGE_RECTS (i, j);
}
while (i < j);
 
/* Move partition element back to middle */
EXCHANGE_RECTS (0, j);
 
/* Recurse */
if (numRects - j - 1 > 1)
quick_sort_rects (&rects[j + 1], numRects - j - 1);
 
numRects = j;
}
while (numRects > 1);
}
 
/*-
*-----------------------------------------------------------------------
* pixman_region_validate --
*
* Take a ``region'' which is a non-y-x-banded random collection of
* rectangles, and compute a nice region which is the union of all the
* rectangles.
*
* Results:
* TRUE if successful.
*
* Side Effects:
* The passed-in ``region'' may be modified.
* overlap set to TRUE if any retangles overlapped,
* else FALSE;
*
* Strategy:
* Step 1. Sort the rectangles into ascending order with primary key y1
* and secondary key x1.
*
* Step 2. Split the rectangles into the minimum number of proper y-x
* banded regions. This may require horizontally merging
* rectangles, and vertically coalescing bands. With any luck,
* this step in an identity transformation (ala the Box widget),
* or a coalescing into 1 box (ala Menus).
*
* Step 3. Merge the separate regions down to a single region by calling
* pixman_region_union. Maximize the work each pixman_region_union call does by using
* a binary merge.
*
*-----------------------------------------------------------------------
*/
 
static pixman_bool_t
validate (region_type_t * badreg,
int * overlap)
{
/* Descriptor for regions under construction in Step 2. */
typedef struct
{
region_type_t reg;
int prev_band;
int cur_band;
} region_info_t;
 
region_info_t stack_regions[64];
 
int numRects; /* Original numRects for badreg */
region_info_t *ri; /* Array of current regions */
int num_ri; /* Number of entries used in ri */
int size_ri; /* Number of entries available in ri */
int i; /* Index into rects */
int j; /* Index into ri */
region_info_t *rit; /* &ri[j] */
region_type_t *reg; /* ri[j].reg */
box_type_t *box; /* Current box in rects */
box_type_t *ri_box; /* Last box in ri[j].reg */
region_type_t *hreg; /* ri[j_half].reg */
pixman_bool_t ret = TRUE;
 
*overlap = FALSE;
if (!badreg->data)
{
GOOD (badreg);
return TRUE;
}
numRects = badreg->data->numRects;
if (!numRects)
{
if (PIXREGION_NAR (badreg))
return FALSE;
GOOD (badreg);
return TRUE;
}
if (badreg->extents.x1 < badreg->extents.x2)
{
if ((numRects) == 1)
{
FREE_DATA (badreg);
badreg->data = (region_data_type_t *) NULL;
}
else
{
DOWNSIZE (badreg, numRects);
}
 
GOOD (badreg);
 
return TRUE;
}
 
/* Step 1: Sort the rects array into ascending (y1, x1) order */
quick_sort_rects (PIXREGION_BOXPTR (badreg), numRects);
 
/* Step 2: Scatter the sorted array into the minimum number of regions */
 
/* Set up the first region to be the first rectangle in badreg */
/* Note that step 2 code will never overflow the ri[0].reg rects array */
ri = stack_regions;
size_ri = sizeof (stack_regions) / sizeof (stack_regions[0]);
num_ri = 1;
ri[0].prev_band = 0;
ri[0].cur_band = 0;
ri[0].reg = *badreg;
box = PIXREGION_BOXPTR (&ri[0].reg);
ri[0].reg.extents = *box;
ri[0].reg.data->numRects = 1;
badreg->extents = *pixman_region_empty_box;
badreg->data = pixman_region_empty_data;
 
/* Now scatter rectangles into the minimum set of valid regions. If the
* next rectangle to be added to a region would force an existing rectangle
* in the region to be split up in order to maintain y-x banding, just
* forget it. Try the next region. If it doesn't fit cleanly into any
* region, make a new one.
*/
 
for (i = numRects; --i > 0;)
{
box++;
/* Look for a region to append box to */
for (j = num_ri, rit = ri; --j >= 0; rit++)
{
reg = &rit->reg;
ri_box = PIXREGION_END (reg);
 
if (box->y1 == ri_box->y1 && box->y2 == ri_box->y2)
{
/* box is in same band as ri_box. Merge or append it */
if (box->x1 <= ri_box->x2)
{
/* Merge it with ri_box */
if (box->x1 < ri_box->x2)
*overlap = TRUE;
 
if (box->x2 > ri_box->x2)
ri_box->x2 = box->x2;
}
else
{
RECTALLOC_BAIL (reg, 1, bail);
*PIXREGION_TOP (reg) = *box;
reg->data->numRects++;
}
goto next_rect; /* So sue me */
}
else if (box->y1 >= ri_box->y2)
{
/* Put box into new band */
if (reg->extents.x2 < ri_box->x2)
reg->extents.x2 = ri_box->x2;
if (reg->extents.x1 > box->x1)
reg->extents.x1 = box->x1;
COALESCE (reg, rit->prev_band, rit->cur_band);
rit->cur_band = reg->data->numRects;
RECTALLOC_BAIL (reg, 1, bail);
*PIXREGION_TOP (reg) = *box;
reg->data->numRects++;
 
goto next_rect;
}
/* Well, this region was inappropriate. Try the next one. */
} /* for j */
 
/* Uh-oh. No regions were appropriate. Create a new one. */
if (size_ri == num_ri)
{
size_t data_size;
 
/* Oops, allocate space for new region information */
size_ri <<= 1;
 
data_size = size_ri * sizeof(region_info_t);
if (data_size / size_ri != sizeof(region_info_t))
goto bail;
 
if (ri == stack_regions)
{
rit = malloc (data_size);
if (!rit)
goto bail;
memcpy (rit, ri, num_ri * sizeof (region_info_t));
}
else
{
rit = (region_info_t *) realloc (ri, data_size);
if (!rit)
goto bail;
}
ri = rit;
rit = &ri[num_ri];
}
num_ri++;
rit->prev_band = 0;
rit->cur_band = 0;
rit->reg.extents = *box;
rit->reg.data = (region_data_type_t *)NULL;
 
/* MUST force allocation */
if (!pixman_rect_alloc (&rit->reg, (i + num_ri) / num_ri))
goto bail;
next_rect: ;
} /* for i */
 
/* Make a final pass over each region in order to COALESCE and set
* extents.x2 and extents.y2
*/
for (j = num_ri, rit = ri; --j >= 0; rit++)
{
reg = &rit->reg;
ri_box = PIXREGION_END (reg);
reg->extents.y2 = ri_box->y2;
 
if (reg->extents.x2 < ri_box->x2)
reg->extents.x2 = ri_box->x2;
COALESCE (reg, rit->prev_band, rit->cur_band);
 
if (reg->data->numRects == 1) /* keep unions happy below */
{
FREE_DATA (reg);
reg->data = (region_data_type_t *)NULL;
}
}
 
/* Step 3: Union all regions into a single region */
while (num_ri > 1)
{
int half = num_ri / 2;
for (j = num_ri & 1; j < (half + (num_ri & 1)); j++)
{
reg = &ri[j].reg;
hreg = &ri[j + half].reg;
 
if (!pixman_op (reg, reg, hreg, pixman_region_union_o, TRUE, TRUE, overlap))
ret = FALSE;
 
if (hreg->extents.x1 < reg->extents.x1)
reg->extents.x1 = hreg->extents.x1;
 
if (hreg->extents.y1 < reg->extents.y1)
reg->extents.y1 = hreg->extents.y1;
 
if (hreg->extents.x2 > reg->extents.x2)
reg->extents.x2 = hreg->extents.x2;
 
if (hreg->extents.y2 > reg->extents.y2)
reg->extents.y2 = hreg->extents.y2;
 
FREE_DATA (hreg);
}
 
num_ri -= half;
 
if (!ret)
goto bail;
}
 
*badreg = ri[0].reg;
 
if (ri != stack_regions)
free (ri);
 
GOOD (badreg);
return ret;
 
bail:
for (i = 0; i < num_ri; i++)
FREE_DATA (&ri[i].reg);
 
if (ri != stack_regions)
free (ri);
 
return pixman_break (badreg);
}
 
/*======================================================================
* Region Subtraction
*====================================================================*/
 
/*-
*-----------------------------------------------------------------------
* pixman_region_subtract_o --
* Overlapping band subtraction. x1 is the left-most point not yet
* checked.
*
* Results:
* TRUE if successful.
*
* Side Effects:
* region may have rectangles added to it.
*
*-----------------------------------------------------------------------
*/
/*ARGSUSED*/
static pixman_bool_t
pixman_region_subtract_o (region_type_t * region,
box_type_t * r1,
box_type_t * r1_end,
box_type_t * r2,
box_type_t * r2_end,
int y1,
int y2,
int * overlap)
{
box_type_t * next_rect;
int x1;
 
x1 = r1->x1;
 
critical_if_fail (y1 < y2);
critical_if_fail (r1 != r1_end && r2 != r2_end);
 
next_rect = PIXREGION_TOP (region);
 
do
{
if (r2->x2 <= x1)
{
/*
* Subtrahend entirely to left of minuend: go to next subtrahend.
*/
r2++;
}
else if (r2->x1 <= x1)
{
/*
* Subtrahend preceeds minuend: nuke left edge of minuend.
*/
x1 = r2->x2;
if (x1 >= r1->x2)
{
/*
* Minuend completely covered: advance to next minuend and
* reset left fence to edge of new minuend.
*/
r1++;
if (r1 != r1_end)
x1 = r1->x1;
}
else
{
/*
* Subtrahend now used up since it doesn't extend beyond
* minuend
*/
r2++;
}
}
else if (r2->x1 < r1->x2)
{
/*
* Left part of subtrahend covers part of minuend: add uncovered
* part of minuend to region and skip to next subtrahend.
*/
critical_if_fail (x1 < r2->x1);
NEWRECT (region, next_rect, x1, y1, r2->x1, y2);
 
x1 = r2->x2;
if (x1 >= r1->x2)
{
/*
* Minuend used up: advance to new...
*/
r1++;
if (r1 != r1_end)
x1 = r1->x1;
}
else
{
/*
* Subtrahend used up
*/
r2++;
}
}
else
{
/*
* Minuend used up: add any remaining piece before advancing.
*/
if (r1->x2 > x1)
NEWRECT (region, next_rect, x1, y1, r1->x2, y2);
 
r1++;
 
if (r1 != r1_end)
x1 = r1->x1;
}
}
while ((r1 != r1_end) && (r2 != r2_end));
 
/*
* Add remaining minuend rectangles to region.
*/
while (r1 != r1_end)
{
critical_if_fail (x1 < r1->x2);
 
NEWRECT (region, next_rect, x1, y1, r1->x2, y2);
 
r1++;
if (r1 != r1_end)
x1 = r1->x1;
}
return TRUE;
}
 
/*-
*-----------------------------------------------------------------------
* pixman_region_subtract --
* Subtract reg_s from reg_m and leave the result in reg_d.
* S stands for subtrahend, M for minuend and D for difference.
*
* Results:
* TRUE if successful.
*
* Side Effects:
* reg_d is overwritten.
*
*-----------------------------------------------------------------------
*/
PIXMAN_EXPORT pixman_bool_t
PREFIX (_subtract) (region_type_t *reg_d,
region_type_t *reg_m,
region_type_t *reg_s)
{
int overlap; /* result ignored */
 
GOOD (reg_m);
GOOD (reg_s);
GOOD (reg_d);
/* check for trivial rejects */
if (PIXREGION_NIL (reg_m) || PIXREGION_NIL (reg_s) ||
!EXTENTCHECK (&reg_m->extents, &reg_s->extents))
{
if (PIXREGION_NAR (reg_s))
return pixman_break (reg_d);
return PREFIX (_copy) (reg_d, reg_m);
}
else if (reg_m == reg_s)
{
FREE_DATA (reg_d);
reg_d->extents.x2 = reg_d->extents.x1;
reg_d->extents.y2 = reg_d->extents.y1;
reg_d->data = pixman_region_empty_data;
 
return TRUE;
}
 
/* Add those rectangles in region 1 that aren't in region 2,
do yucky substraction for overlaps, and
just throw away rectangles in region 2 that aren't in region 1 */
if (!pixman_op (reg_d, reg_m, reg_s, pixman_region_subtract_o, TRUE, FALSE, &overlap))
return FALSE;
 
/*
* Can't alter reg_d's extents before we call pixman_op because
* it might be one of the source regions and pixman_op depends
* on the extents of those regions being unaltered. Besides, this
* way there's no checking against rectangles that will be nuked
* due to coalescing, so we have to examine fewer rectangles.
*/
pixman_set_extents (reg_d);
GOOD (reg_d);
return TRUE;
}
 
/*======================================================================
* Region Inversion
*====================================================================*/
 
/*-
*-----------------------------------------------------------------------
* pixman_region_inverse --
* Take a region and a box and return a region that is everything
* in the box but not in the region. The careful reader will note
* that this is the same as subtracting the region from the box...
*
* Results:
* TRUE.
*
* Side Effects:
* new_reg is overwritten.
*
*-----------------------------------------------------------------------
*/
pixman_bool_t
PIXMAN_EXPORT PREFIX (_inverse) (region_type_t *new_reg, /* Destination region */
region_type_t *reg1, /* Region to invert */
box_type_t * inv_rect) /* Bounding box for inversion */
{
region_type_t inv_reg; /* Quick and dirty region made from the
* bounding box */
int overlap; /* result ignored */
 
GOOD (reg1);
GOOD (new_reg);
/* check for trivial rejects */
if (PIXREGION_NIL (reg1) || !EXTENTCHECK (inv_rect, &reg1->extents))
{
if (PIXREGION_NAR (reg1))
return pixman_break (new_reg);
new_reg->extents = *inv_rect;
FREE_DATA (new_reg);
new_reg->data = (region_data_type_t *)NULL;
return TRUE;
}
 
/* Add those rectangles in region 1 that aren't in region 2,
* do yucky substraction for overlaps, and
* just throw away rectangles in region 2 that aren't in region 1
*/
inv_reg.extents = *inv_rect;
inv_reg.data = (region_data_type_t *)NULL;
if (!pixman_op (new_reg, &inv_reg, reg1, pixman_region_subtract_o, TRUE, FALSE, &overlap))
return FALSE;
 
/*
* Can't alter new_reg's extents before we call pixman_op because
* it might be one of the source regions and pixman_op depends
* on the extents of those regions being unaltered. Besides, this
* way there's no checking against rectangles that will be nuked
* due to coalescing, so we have to examine fewer rectangles.
*/
pixman_set_extents (new_reg);
GOOD (new_reg);
return TRUE;
}
 
/*
* rect_in(region, rect)
* This routine takes a pointer to a region and a pointer to a box
* and determines if the box is outside/inside/partly inside the region.
*
* The idea is to travel through the list of rectangles trying to cover the
* passed box with them. Anytime a piece of the rectangle isn't covered
* by a band of rectangles, part_out is set TRUE. Any time a rectangle in
* the region covers part of the box, part_in is set TRUE. The process ends
* when either the box has been completely covered (we reached a band that
* doesn't overlap the box, part_in is TRUE and part_out is false), the
* box has been partially covered (part_in == part_out == TRUE -- because of
* the banding, the first time this is true we know the box is only
* partially in the region) or is outside the region (we reached a band
* that doesn't overlap the box at all and part_in is false)
*/
 
pixman_region_overlap_t
PIXMAN_EXPORT PREFIX (_contains_rectangle) (region_type_t * region,
box_type_t * prect)
{
box_type_t * pbox;
box_type_t * pbox_end;
int part_in, part_out;
int numRects;
int x, y;
 
GOOD (region);
 
numRects = PIXREGION_NUMRECTS (region);
 
/* useful optimization */
if (!numRects || !EXTENTCHECK (&region->extents, prect))
return(PIXMAN_REGION_OUT);
 
if (numRects == 1)
{
/* We know that it must be PIXMAN_REGION_IN or PIXMAN_REGION_PART */
if (SUBSUMES (&region->extents, prect))
return(PIXMAN_REGION_IN);
else
return(PIXMAN_REGION_PART);
}
 
part_out = FALSE;
part_in = FALSE;
 
/* (x,y) starts at upper left of rect, moving to the right and down */
x = prect->x1;
y = prect->y1;
 
/* can stop when both part_out and part_in are TRUE, or we reach prect->y2 */
for (pbox = PIXREGION_BOXPTR (region), pbox_end = pbox + numRects;
pbox != pbox_end;
pbox++)
{
 
if (pbox->y2 <= y)
continue; /* getting up to speed or skipping remainder of band */
 
if (pbox->y1 > y)
{
part_out = TRUE; /* missed part of rectangle above */
if (part_in || (pbox->y1 >= prect->y2))
break;
y = pbox->y1; /* x guaranteed to be == prect->x1 */
}
 
if (pbox->x2 <= x)
continue; /* not far enough over yet */
 
if (pbox->x1 > x)
{
part_out = TRUE; /* missed part of rectangle to left */
if (part_in)
break;
}
 
if (pbox->x1 < prect->x2)
{
part_in = TRUE; /* definitely overlap */
if (part_out)
break;
}
 
if (pbox->x2 >= prect->x2)
{
y = pbox->y2; /* finished with this band */
if (y >= prect->y2)
break;
x = prect->x1; /* reset x out to left again */
}
else
{
/*
* Because boxes in a band are maximal width, if the first box
* to overlap the rectangle doesn't completely cover it in that
* band, the rectangle must be partially out, since some of it
* will be uncovered in that band. part_in will have been set true
* by now...
*/
part_out = TRUE;
break;
}
}
 
if (part_in)
{
if (y < prect->y2)
return PIXMAN_REGION_PART;
else
return PIXMAN_REGION_IN;
}
else
{
return PIXMAN_REGION_OUT;
}
}
 
/* PREFIX(_translate) (region, x, y)
* translates in place
*/
 
PIXMAN_EXPORT void
PREFIX (_translate) (region_type_t *region, int x, int y)
{
overflow_int_t x1, x2, y1, y2;
int nbox;
box_type_t * pbox;
 
GOOD (region);
region->extents.x1 = x1 = region->extents.x1 + x;
region->extents.y1 = y1 = region->extents.y1 + y;
region->extents.x2 = x2 = region->extents.x2 + x;
region->extents.y2 = y2 = region->extents.y2 + y;
if (((x1 - PIXMAN_REGION_MIN) | (y1 - PIXMAN_REGION_MIN) | (PIXMAN_REGION_MAX - x2) | (PIXMAN_REGION_MAX - y2)) >= 0)
{
if (region->data && (nbox = region->data->numRects))
{
for (pbox = PIXREGION_BOXPTR (region); nbox--; pbox++)
{
pbox->x1 += x;
pbox->y1 += y;
pbox->x2 += x;
pbox->y2 += y;
}
}
return;
}
 
if (((x2 - PIXMAN_REGION_MIN) | (y2 - PIXMAN_REGION_MIN) | (PIXMAN_REGION_MAX - x1) | (PIXMAN_REGION_MAX - y1)) <= 0)
{
region->extents.x2 = region->extents.x1;
region->extents.y2 = region->extents.y1;
FREE_DATA (region);
region->data = pixman_region_empty_data;
return;
}
 
if (x1 < PIXMAN_REGION_MIN)
region->extents.x1 = PIXMAN_REGION_MIN;
else if (x2 > PIXMAN_REGION_MAX)
region->extents.x2 = PIXMAN_REGION_MAX;
 
if (y1 < PIXMAN_REGION_MIN)
region->extents.y1 = PIXMAN_REGION_MIN;
else if (y2 > PIXMAN_REGION_MAX)
region->extents.y2 = PIXMAN_REGION_MAX;
 
if (region->data && (nbox = region->data->numRects))
{
box_type_t * pbox_out;
 
for (pbox_out = pbox = PIXREGION_BOXPTR (region); nbox--; pbox++)
{
pbox_out->x1 = x1 = pbox->x1 + x;
pbox_out->y1 = y1 = pbox->y1 + y;
pbox_out->x2 = x2 = pbox->x2 + x;
pbox_out->y2 = y2 = pbox->y2 + y;
 
if (((x2 - PIXMAN_REGION_MIN) | (y2 - PIXMAN_REGION_MIN) |
(PIXMAN_REGION_MAX - x1) | (PIXMAN_REGION_MAX - y1)) <= 0)
{
region->data->numRects--;
continue;
}
 
if (x1 < PIXMAN_REGION_MIN)
pbox_out->x1 = PIXMAN_REGION_MIN;
else if (x2 > PIXMAN_REGION_MAX)
pbox_out->x2 = PIXMAN_REGION_MAX;
 
if (y1 < PIXMAN_REGION_MIN)
pbox_out->y1 = PIXMAN_REGION_MIN;
else if (y2 > PIXMAN_REGION_MAX)
pbox_out->y2 = PIXMAN_REGION_MAX;
 
pbox_out++;
}
 
if (pbox_out != pbox)
{
if (region->data->numRects == 1)
{
region->extents = *PIXREGION_BOXPTR (region);
FREE_DATA (region);
region->data = (region_data_type_t *)NULL;
}
else
{
pixman_set_extents (region);
}
}
}
 
GOOD (region);
}
 
PIXMAN_EXPORT void
PREFIX (_reset) (region_type_t *region, box_type_t *box)
{
GOOD (region);
 
critical_if_fail (GOOD_RECT (box));
 
region->extents = *box;
 
FREE_DATA (region);
 
region->data = NULL;
}
 
/* box is "return" value */
PIXMAN_EXPORT int
PREFIX (_contains_point) (region_type_t * region,
int x, int y,
box_type_t * box)
{
box_type_t *pbox, *pbox_end;
int numRects;
 
GOOD (region);
numRects = PIXREGION_NUMRECTS (region);
 
if (!numRects || !INBOX (&region->extents, x, y))
return(FALSE);
 
if (numRects == 1)
{
if (box)
*box = region->extents;
 
return(TRUE);
}
 
for (pbox = PIXREGION_BOXPTR (region), pbox_end = pbox + numRects;
pbox != pbox_end;
pbox++)
{
if (y >= pbox->y2)
continue; /* not there yet */
 
if ((y < pbox->y1) || (x < pbox->x1))
break; /* missed it */
 
if (x >= pbox->x2)
continue; /* not there yet */
 
if (box)
*box = *pbox;
 
return(TRUE);
}
 
return(FALSE);
}
 
PIXMAN_EXPORT int
PREFIX (_not_empty) (region_type_t * region)
{
GOOD (region);
 
return(!PIXREGION_NIL (region));
}
 
PIXMAN_EXPORT box_type_t *
PREFIX (_extents) (region_type_t * region)
{
GOOD (region);
 
return(&region->extents);
}
 
/*
* Clip a list of scanlines to a region. The caller has allocated the
* space. FSorted is non-zero if the scanline origins are in ascending order.
*
* returns the number of new, clipped scanlines.
*/
 
PIXMAN_EXPORT pixman_bool_t
PREFIX (_selfcheck) (region_type_t *reg)
{
int i, numRects;
 
if ((reg->extents.x1 > reg->extents.x2) ||
(reg->extents.y1 > reg->extents.y2))
{
return FALSE;
}
 
numRects = PIXREGION_NUMRECTS (reg);
if (!numRects)
{
return ((reg->extents.x1 == reg->extents.x2) &&
(reg->extents.y1 == reg->extents.y2) &&
(reg->data->size || (reg->data == pixman_region_empty_data)));
}
else if (numRects == 1)
{
return (!reg->data);
}
else
{
box_type_t * pbox_p, * pbox_n;
box_type_t box;
 
pbox_p = PIXREGION_RECTS (reg);
box = *pbox_p;
box.y2 = pbox_p[numRects - 1].y2;
pbox_n = pbox_p + 1;
 
for (i = numRects; --i > 0; pbox_p++, pbox_n++)
{
if ((pbox_n->x1 >= pbox_n->x2) ||
(pbox_n->y1 >= pbox_n->y2))
{
return FALSE;
}
 
if (pbox_n->x1 < box.x1)
box.x1 = pbox_n->x1;
if (pbox_n->x2 > box.x2)
box.x2 = pbox_n->x2;
if ((pbox_n->y1 < pbox_p->y1) ||
((pbox_n->y1 == pbox_p->y1) &&
((pbox_n->x1 < pbox_p->x2) || (pbox_n->y2 != pbox_p->y2))))
{
return FALSE;
}
}
 
return ((box.x1 == reg->extents.x1) &&
(box.x2 == reg->extents.x2) &&
(box.y1 == reg->extents.y1) &&
(box.y2 == reg->extents.y2));
}
}
 
PIXMAN_EXPORT pixman_bool_t
PREFIX (_init_rects) (region_type_t *region,
const box_type_t *boxes, int count)
{
box_type_t *rects;
int displacement;
int i;
 
/* if it's 1, then we just want to set the extents, so call
* the existing method. */
if (count == 1)
{
PREFIX (_init_rect) (region,
boxes[0].x1,
boxes[0].y1,
boxes[0].x2 - boxes[0].x1,
boxes[0].y2 - boxes[0].y1);
return TRUE;
}
 
PREFIX (_init) (region);
 
/* if it's 0, don't call pixman_rect_alloc -- 0 rectangles is
* a special case, and causing pixman_rect_alloc would cause
* us to leak memory (because the 0-rect case should be the
* static pixman_region_empty_data data).
*/
if (count == 0)
return TRUE;
 
if (!pixman_rect_alloc (region, count))
return FALSE;
 
rects = PIXREGION_RECTS (region);
 
/* Copy in the rects */
memcpy (rects, boxes, sizeof(box_type_t) * count);
region->data->numRects = count;
 
/* Eliminate empty and malformed rectangles */
displacement = 0;
 
for (i = 0; i < count; ++i)
{
box_type_t *box = &rects[i];
 
if (box->x1 >= box->x2 || box->y1 >= box->y2)
displacement++;
else if (displacement)
rects[i - displacement] = rects[i];
}
 
region->data->numRects -= displacement;
 
/* If eliminating empty rectangles caused there
* to be only 0 or 1 rectangles, deal with that.
*/
if (region->data->numRects == 0)
{
FREE_DATA (region);
PREFIX (_init) (region);
 
return TRUE;
}
 
if (region->data->numRects == 1)
{
region->extents = rects[0];
 
FREE_DATA (region);
region->data = NULL;
 
GOOD (region);
 
return TRUE;
}
 
/* Validate */
region->extents.x1 = region->extents.x2 = 0;
 
return validate (region, &i);
}
 
#define READ(_ptr) (*(_ptr))
 
static inline box_type_t *
bitmap_addrect (region_type_t *reg,
box_type_t *r,
box_type_t **first_rect,
int rx1, int ry1,
int rx2, int ry2)
{
if ((rx1 < rx2) && (ry1 < ry2) &&
(!(reg->data->numRects &&
((r-1)->y1 == ry1) && ((r-1)->y2 == ry2) &&
((r-1)->x1 <= rx1) && ((r-1)->x2 >= rx2))))
{
if (!reg->data ||
reg->data->numRects == reg->data->size)
{
if (!pixman_rect_alloc (reg, 1))
return NULL;
*first_rect = PIXREGION_BOXPTR(reg);
r = *first_rect + reg->data->numRects;
}
r->x1 = rx1;
r->y1 = ry1;
r->x2 = rx2;
r->y2 = ry2;
reg->data->numRects++;
if (r->x1 < reg->extents.x1)
reg->extents.x1 = r->x1;
if (r->x2 > reg->extents.x2)
reg->extents.x2 = r->x2;
r++;
}
return r;
}
 
/* Convert bitmap clip mask into clipping region.
* First, goes through each line and makes boxes by noting the transitions
* from 0 to 1 and 1 to 0.
* Then it coalesces the current line with the previous if they have boxes
* at the same X coordinates.
* Stride is in number of uint32_t per line.
*/
PIXMAN_EXPORT void
PREFIX (_init_from_image) (region_type_t *region,
pixman_image_t *image)
{
uint32_t mask0 = 0xffffffff & ~SCREEN_SHIFT_RIGHT(0xffffffff, 1);
box_type_t *first_rect, *rects, *prect_line_start;
box_type_t *old_rect, *new_rect;
uint32_t *pw, w, *pw_line, *pw_line_end;
int irect_prev_start, irect_line_start;
int h, base, rx1 = 0, crects;
int ib;
pixman_bool_t in_box, same;
int width, height, stride;
 
PREFIX(_init) (region);
 
return_if_fail (image->type == BITS);
return_if_fail (image->bits.format == PIXMAN_a1);
 
pw_line = pixman_image_get_data (image);
width = pixman_image_get_width (image);
height = pixman_image_get_height (image);
stride = pixman_image_get_stride (image) / 4;
 
first_rect = PIXREGION_BOXPTR(region);
rects = first_rect;
 
region->extents.x1 = width - 1;
region->extents.x2 = 0;
irect_prev_start = -1;
for (h = 0; h < height; h++)
{
pw = pw_line;
pw_line += stride;
irect_line_start = rects - first_rect;
 
/* If the Screen left most bit of the word is set, we're starting in
* a box */
if (READ(pw) & mask0)
{
in_box = TRUE;
rx1 = 0;
}
else
{
in_box = FALSE;
}
 
/* Process all words which are fully in the pixmap */
pw_line_end = pw + (width >> 5);
for (base = 0; pw < pw_line_end; base += 32)
{
w = READ(pw++);
if (in_box)
{
if (!~w)
continue;
}
else
{
if (!w)
continue;
}
for (ib = 0; ib < 32; ib++)
{
/* If the Screen left most bit of the word is set, we're
* starting a box */
if (w & mask0)
{
if (!in_box)
{
rx1 = base + ib;
/* start new box */
in_box = TRUE;
}
}
else
{
if (in_box)
{
/* end box */
rects = bitmap_addrect (region, rects, &first_rect,
rx1, h, base + ib, h + 1);
if (rects == NULL)
goto error;
in_box = FALSE;
}
}
/* Shift the word VISUALLY left one. */
w = SCREEN_SHIFT_LEFT(w, 1);
}
}
 
if (width & 31)
{
/* Process final partial word on line */
w = READ(pw++);
for (ib = 0; ib < (width & 31); ib++)
{
/* If the Screen left most bit of the word is set, we're
* starting a box */
if (w & mask0)
{
if (!in_box)
{
rx1 = base + ib;
/* start new box */
in_box = TRUE;
}
}
else
{
if (in_box)
{
/* end box */
rects = bitmap_addrect(region, rects, &first_rect,
rx1, h, base + ib, h + 1);
if (rects == NULL)
goto error;
in_box = FALSE;
}
}
/* Shift the word VISUALLY left one. */
w = SCREEN_SHIFT_LEFT(w, 1);
}
}
/* If scanline ended with last bit set, end the box */
if (in_box)
{
rects = bitmap_addrect(region, rects, &first_rect,
rx1, h, base + (width & 31), h + 1);
if (rects == NULL)
goto error;
}
/* if all rectangles on this line have the same x-coords as
* those on the previous line, then add 1 to all the previous y2s and
* throw away all the rectangles from this line
*/
same = FALSE;
if (irect_prev_start != -1)
{
crects = irect_line_start - irect_prev_start;
if (crects != 0 &&
crects == ((rects - first_rect) - irect_line_start))
{
old_rect = first_rect + irect_prev_start;
new_rect = prect_line_start = first_rect + irect_line_start;
same = TRUE;
while (old_rect < prect_line_start)
{
if ((old_rect->x1 != new_rect->x1) ||
(old_rect->x2 != new_rect->x2))
{
same = FALSE;
break;
}
old_rect++;
new_rect++;
}
if (same)
{
old_rect = first_rect + irect_prev_start;
while (old_rect < prect_line_start)
{
old_rect->y2 += 1;
old_rect++;
}
rects -= crects;
region->data->numRects -= crects;
}
}
}
if(!same)
irect_prev_start = irect_line_start;
}
if (!region->data->numRects)
{
region->extents.x1 = region->extents.x2 = 0;
}
else
{
region->extents.y1 = PIXREGION_BOXPTR(region)->y1;
region->extents.y2 = PIXREGION_END(region)->y2;
if (region->data->numRects == 1)
{
free (region->data);
region->data = NULL;
}
}
 
error:
return;
}
/programs/develop/libraries/pixman/pixman-region16.c
0,0 → 1,67
/*
* Copyright © 2008 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of
* Red Hat, Inc. not be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Red Hat, Inc. makes no representations about the
* suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* RED HAT, INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL RED HAT, INC. BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Soren Sandmann <sandmann@redhat.com>
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#undef PIXMAN_DISABLE_DEPRECATED
 
#include "pixman-private.h"
 
#include <stdlib.h>
 
typedef pixman_box16_t box_type_t;
typedef pixman_region16_data_t region_data_type_t;
typedef pixman_region16_t region_type_t;
typedef int32_t overflow_int_t;
 
typedef struct {
int x, y;
} point_type_t;
 
#define PREFIX(x) pixman_region##x
 
#define PIXMAN_REGION_MAX INT16_MAX
#define PIXMAN_REGION_MIN INT16_MIN
 
#include "pixman-region.c"
 
/* This function exists only to make it possible to preserve the X ABI -
* it should go away at first opportunity.
*
* The problem is that the X ABI exports the three structs and has used
* them through macros. So the X server calls this function with
* the addresses of those structs which makes the existing code continue to
* work.
*/
PIXMAN_EXPORT void
pixman_region_set_static_pointers (pixman_box16_t *empty_box,
pixman_region16_data_t *empty_data,
pixman_region16_data_t *broken_data)
{
pixman_region_empty_box = empty_box;
pixman_region_empty_data = empty_data;
pixman_broken_data = broken_data;
}
/programs/develop/libraries/pixman/pixman-region32.c
0,0 → 1,47
/*
* Copyright © 2008 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of
* Red Hat, Inc. not be used in advertising or publicity pertaining to
* distribution of the software without specific, written prior
* permission. Red Hat, Inc. makes no representations about the
* suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* RED HAT, INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL RED HAT, INC. BE LIABLE FOR ANY SPECIAL,
* INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
* RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
* IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Soren Sandmann <sandmann@redhat.com>
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include "pixman-private.h"
 
#include <stdlib.h>
 
typedef pixman_box32_t box_type_t;
typedef pixman_region32_data_t region_data_type_t;
typedef pixman_region32_t region_type_t;
typedef int64_t overflow_int_t;
 
typedef struct {
int x, y;
} point_type_t;
 
#define PREFIX(x) pixman_region32##x
 
#define PIXMAN_REGION_MAX INT32_MAX
#define PIXMAN_REGION_MIN INT32_MIN
 
#include "pixman-region.c"
/programs/develop/libraries/pixman/pixman-solid-fill.c
0,0 → 1,117
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007, 2009 Red Hat, Inc.
* Copyright © 2009 Soren Sandmann
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
 
static void
solid_fill_get_scanline_32 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
uint32_t *end = buffer + width;
uint32_t color = image->solid.color_32;
 
while (buffer < end)
*(buffer++) = color;
 
return;
}
 
static void
solid_fill_get_scanline_64 (pixman_image_t *image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t *mask)
{
uint64_t *b = (uint64_t *)buffer;
uint64_t *e = b + width;
uint64_t color = image->solid.color_64;
 
while (b < e)
*(b++) = color;
}
 
static source_image_class_t
solid_fill_classify (pixman_image_t *image,
int x,
int y,
int width,
int height)
{
return SOURCE_IMAGE_CLASS_HORIZONTAL;
}
 
static void
solid_fill_property_changed (pixman_image_t *image)
{
image->common.get_scanline_32 = solid_fill_get_scanline_32;
image->common.get_scanline_64 = solid_fill_get_scanline_64;
}
 
static uint32_t
color_to_uint32 (const pixman_color_t *color)
{
return
(color->alpha >> 8 << 24) |
(color->red >> 8 << 16) |
(color->green & 0xff00) |
(color->blue >> 8);
}
 
static uint64_t
color_to_uint64 (const pixman_color_t *color)
{
return
((uint64_t)color->alpha << 48) |
((uint64_t)color->red << 32) |
((uint64_t)color->green << 16) |
((uint64_t)color->blue);
}
 
PIXMAN_EXPORT pixman_image_t *
pixman_image_create_solid_fill (pixman_color_t *color)
{
pixman_image_t *img = _pixman_image_allocate ();
 
if (!img)
return NULL;
 
img->type = SOLID;
img->solid.color = *color;
img->solid.color_32 = color_to_uint32 (color);
img->solid.color_64 = color_to_uint64 (color);
 
img->common.classify = solid_fill_classify;
img->common.property_changed = solid_fill_property_changed;
 
return img;
}
 
/programs/develop/libraries/pixman/pixman-timer.c
0,0 → 1,66
/*
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Red Hat not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. Red Hat makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* RED HAT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL RED HAT
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <stdlib.h>
#include <stdio.h>
#include "pixman-private.h"
 
#ifdef PIXMAN_TIMERS
 
static pixman_timer_t *timers;
 
static void
dump_timers (void)
{
pixman_timer_t *timer;
 
for (timer = timers; timer != NULL; timer = timer->next)
{
printf ("%s: total: %llu n: %llu avg: %f\n",
timer->name,
timer->total,
timer->n_times,
timer->total / (double)timer->n_times);
}
}
 
void
pixman_timer_register (pixman_timer_t *timer)
{
static int initialized;
 
int atexit (void (*function)(void));
 
if (!initialized)
{
atexit (dump_timers);
initialized = 1;
}
 
timer->next = timers;
timers = timer;
}
 
#endif
/programs/develop/libraries/pixman/pixman-trap.c
0,0 → 1,392
/*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
 
#include <stdio.h>
#include "pixman-private.h"
 
/*
* Compute the smallest value greater than or equal to y which is on a
* grid row.
*/
 
PIXMAN_EXPORT pixman_fixed_t
pixman_sample_ceil_y (pixman_fixed_t y, int n)
{
pixman_fixed_t f = pixman_fixed_frac (y);
pixman_fixed_t i = pixman_fixed_floor (y);
 
f = DIV (f - Y_FRAC_FIRST (n) + (STEP_Y_SMALL (n) - pixman_fixed_e), STEP_Y_SMALL (n)) * STEP_Y_SMALL (n) +
Y_FRAC_FIRST (n);
if (f > Y_FRAC_LAST (n))
{
if (pixman_fixed_to_int (i) == 0x7fff)
{
f = 0xffff; /* saturate */
}
else
{
f = Y_FRAC_FIRST (n);
i += pixman_fixed_1;
}
}
return (i | f);
}
 
/*
* Compute the largest value strictly less than y which is on a
* grid row.
*/
PIXMAN_EXPORT pixman_fixed_t
pixman_sample_floor_y (pixman_fixed_t y,
int n)
{
pixman_fixed_t f = pixman_fixed_frac (y);
pixman_fixed_t i = pixman_fixed_floor (y);
 
f = DIV (f - pixman_fixed_e - Y_FRAC_FIRST (n), STEP_Y_SMALL (n)) * STEP_Y_SMALL (n) +
Y_FRAC_FIRST (n);
 
if (f < Y_FRAC_FIRST (n))
{
if (pixman_fixed_to_int (i) == 0x8000)
{
f = 0; /* saturate */
}
else
{
f = Y_FRAC_LAST (n);
i -= pixman_fixed_1;
}
}
return (i | f);
}
 
/*
* Step an edge by any amount (including negative values)
*/
PIXMAN_EXPORT void
pixman_edge_step (pixman_edge_t *e,
int n)
{
pixman_fixed_48_16_t ne;
 
e->x += n * e->stepx;
 
ne = e->e + n * (pixman_fixed_48_16_t) e->dx;
 
if (n >= 0)
{
if (ne > 0)
{
int nx = (ne + e->dy - 1) / e->dy;
e->e = ne - nx * (pixman_fixed_48_16_t) e->dy;
e->x += nx * e->signdx;
}
}
else
{
if (ne <= -e->dy)
{
int nx = (-ne) / e->dy;
e->e = ne + nx * (pixman_fixed_48_16_t) e->dy;
e->x -= nx * e->signdx;
}
}
}
 
/*
* A private routine to initialize the multi-step
* elements of an edge structure
*/
static void
_pixman_edge_multi_init (pixman_edge_t * e,
int n,
pixman_fixed_t *stepx_p,
pixman_fixed_t *dx_p)
{
pixman_fixed_t stepx;
pixman_fixed_48_16_t ne;
 
ne = n * (pixman_fixed_48_16_t) e->dx;
stepx = n * e->stepx;
 
if (ne > 0)
{
int nx = ne / e->dy;
ne -= nx * e->dy;
stepx += nx * e->signdx;
}
 
*dx_p = ne;
*stepx_p = stepx;
}
 
/*
* Initialize one edge structure given the line endpoints and a
* starting y value
*/
PIXMAN_EXPORT void
pixman_edge_init (pixman_edge_t *e,
int n,
pixman_fixed_t y_start,
pixman_fixed_t x_top,
pixman_fixed_t y_top,
pixman_fixed_t x_bot,
pixman_fixed_t y_bot)
{
pixman_fixed_t dx, dy;
 
e->x = x_top;
e->e = 0;
dx = x_bot - x_top;
dy = y_bot - y_top;
e->dy = dy;
e->dx = 0;
 
if (dy)
{
if (dx >= 0)
{
e->signdx = 1;
e->stepx = dx / dy;
e->dx = dx % dy;
e->e = -dy;
}
else
{
e->signdx = -1;
e->stepx = -(-dx / dy);
e->dx = -dx % dy;
e->e = 0;
}
 
_pixman_edge_multi_init (e, STEP_Y_SMALL (n),
&e->stepx_small, &e->dx_small);
 
_pixman_edge_multi_init (e, STEP_Y_BIG (n),
&e->stepx_big, &e->dx_big);
}
pixman_edge_step (e, y_start - y_top);
}
 
/*
* Initialize one edge structure given a line, starting y value
* and a pixel offset for the line
*/
PIXMAN_EXPORT void
pixman_line_fixed_edge_init (pixman_edge_t * e,
int n,
pixman_fixed_t y,
const pixman_line_fixed_t *line,
int x_off,
int y_off)
{
pixman_fixed_t x_off_fixed = pixman_int_to_fixed (x_off);
pixman_fixed_t y_off_fixed = pixman_int_to_fixed (y_off);
const pixman_point_fixed_t *top, *bot;
 
if (line->p1.y <= line->p2.y)
{
top = &line->p1;
bot = &line->p2;
}
else
{
top = &line->p2;
bot = &line->p1;
}
pixman_edge_init (e, n, y,
top->x + x_off_fixed,
top->y + y_off_fixed,
bot->x + x_off_fixed,
bot->y + y_off_fixed);
}
 
PIXMAN_EXPORT void
pixman_add_traps (pixman_image_t * image,
int16_t x_off,
int16_t y_off,
int ntrap,
pixman_trap_t * traps)
{
int bpp;
int width;
int height;
 
pixman_fixed_t x_off_fixed;
pixman_fixed_t y_off_fixed;
pixman_edge_t l, r;
pixman_fixed_t t, b;
 
_pixman_image_validate (image);
width = image->bits.width;
height = image->bits.height;
bpp = PIXMAN_FORMAT_BPP (image->bits.format);
 
x_off_fixed = pixman_int_to_fixed (x_off);
y_off_fixed = pixman_int_to_fixed (y_off);
 
while (ntrap--)
{
t = traps->top.y + y_off_fixed;
if (t < 0)
t = 0;
t = pixman_sample_ceil_y (t, bpp);
 
b = traps->bot.y + y_off_fixed;
if (pixman_fixed_to_int (b) >= height)
b = pixman_int_to_fixed (height) - 1;
b = pixman_sample_floor_y (b, bpp);
 
if (b >= t)
{
/* initialize edge walkers */
pixman_edge_init (&l, bpp, t,
traps->top.l + x_off_fixed,
traps->top.y + y_off_fixed,
traps->bot.l + x_off_fixed,
traps->bot.y + y_off_fixed);
 
pixman_edge_init (&r, bpp, t,
traps->top.r + x_off_fixed,
traps->top.y + y_off_fixed,
traps->bot.r + x_off_fixed,
traps->bot.y + y_off_fixed);
 
pixman_rasterize_edges (image, &l, &r, t, b);
}
 
traps++;
}
}
 
#if 0
static void
dump_image (pixman_image_t *image,
const char * title)
{
int i, j;
 
if (!image->type == BITS)
printf ("%s is not a regular image\n", title);
 
if (!image->bits.format == PIXMAN_a8)
printf ("%s is not an alpha mask\n", title);
 
printf ("\n\n\n%s: \n", title);
 
for (i = 0; i < image->bits.height; ++i)
{
uint8_t *line =
(uint8_t *)&(image->bits.bits[i * image->bits.rowstride]);
 
for (j = 0; j < image->bits.width; ++j)
printf ("%c", line[j] ? '#' : ' ');
 
printf ("\n");
}
}
#endif
 
PIXMAN_EXPORT void
pixman_add_trapezoids (pixman_image_t * image,
int16_t x_off,
int y_off,
int ntraps,
const pixman_trapezoid_t *traps)
{
int i;
 
#if 0
dump_image (image, "before");
#endif
 
for (i = 0; i < ntraps; ++i)
{
const pixman_trapezoid_t *trap = &(traps[i]);
 
if (!pixman_trapezoid_valid (trap))
continue;
 
pixman_rasterize_trapezoid (image, trap, x_off, y_off);
}
 
#if 0
dump_image (image, "after");
#endif
}
 
PIXMAN_EXPORT void
pixman_rasterize_trapezoid (pixman_image_t * image,
const pixman_trapezoid_t *trap,
int x_off,
int y_off)
{
int bpp;
int width;
int height;
 
pixman_fixed_t x_off_fixed;
pixman_fixed_t y_off_fixed;
pixman_edge_t l, r;
pixman_fixed_t t, b;
 
return_if_fail (image->type == BITS);
 
_pixman_image_validate (image);
if (!pixman_trapezoid_valid (trap))
return;
 
width = image->bits.width;
height = image->bits.height;
bpp = PIXMAN_FORMAT_BPP (image->bits.format);
 
x_off_fixed = pixman_int_to_fixed (x_off);
y_off_fixed = pixman_int_to_fixed (y_off);
 
t = trap->top + y_off_fixed;
if (t < 0)
t = 0;
t = pixman_sample_ceil_y (t, bpp);
 
b = trap->bottom + y_off_fixed;
if (pixman_fixed_to_int (b) >= height)
b = pixman_int_to_fixed (height) - 1;
b = pixman_sample_floor_y (b, bpp);
if (b >= t)
{
/* initialize edge walkers */
pixman_line_fixed_edge_init (&l, bpp, t, &trap->left, x_off, y_off);
pixman_line_fixed_edge_init (&r, bpp, t, &trap->right, x_off, y_off);
 
pixman_rasterize_edges (image, &l, &r, t, b);
}
}
/programs/develop/libraries/pixman/pixman-utils.c
0,0 → 1,258
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 1999 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Keith Packard, SuSE, Inc.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
 
#include "pixman-private.h"
 
pixman_bool_t
pixman_multiply_overflows_int (unsigned int a,
unsigned int b)
{
return a >= INT32_MAX / b;
}
 
pixman_bool_t
pixman_addition_overflows_int (unsigned int a,
unsigned int b)
{
return a > INT32_MAX - b;
}
 
void *
pixman_malloc_ab (unsigned int a,
unsigned int b)
{
if (a >= INT32_MAX / b)
return NULL;
 
return malloc (a * b);
}
 
void *
pixman_malloc_abc (unsigned int a,
unsigned int b,
unsigned int c)
{
if (a >= INT32_MAX / b)
return NULL;
else if (a * b >= INT32_MAX / c)
return NULL;
else
return malloc (a * b * c);
}
 
/*
* Helper routine to expand a color component from 0 < n <= 8 bits to 16
* bits by replication.
*/
static inline uint64_t
expand16 (const uint8_t val, int nbits)
{
/* Start out with the high bit of val in the high bit of result. */
uint16_t result = (uint16_t)val << (16 - nbits);
 
if (nbits == 0)
return 0;
 
/* Copy the bits in result, doubling the number of bits each time, until
* we fill all 16 bits.
*/
while (nbits < 16)
{
result |= result >> nbits;
nbits *= 2;
}
 
return result;
}
 
/*
* This function expands images from ARGB8 format to ARGB16. To preserve
* precision, it needs to know the original source format. For example, if the
* source was PIXMAN_x1r5g5b5 and the red component contained bits 12345, then
* the expanded value is 12345123. To correctly expand this to 16 bits, it
* should be 1234512345123451 and not 1234512312345123.
*/
void
pixman_expand (uint64_t * dst,
const uint32_t * src,
pixman_format_code_t format,
int width)
{
/*
* Determine the sizes of each component and the masks and shifts
* required to extract them from the source pixel.
*/
const int a_size = PIXMAN_FORMAT_A (format),
r_size = PIXMAN_FORMAT_R (format),
g_size = PIXMAN_FORMAT_G (format),
b_size = PIXMAN_FORMAT_B (format);
const int a_shift = 32 - a_size,
r_shift = 24 - r_size,
g_shift = 16 - g_size,
b_shift = 8 - b_size;
const uint8_t a_mask = ~(~0 << a_size),
r_mask = ~(~0 << r_size),
g_mask = ~(~0 << g_size),
b_mask = ~(~0 << b_size);
int i;
 
/* Start at the end so that we can do the expansion in place
* when src == dst
*/
for (i = width - 1; i >= 0; i--)
{
const uint32_t pixel = src[i];
const uint8_t a = (pixel >> a_shift) & a_mask,
r = (pixel >> r_shift) & r_mask,
g = (pixel >> g_shift) & g_mask,
b = (pixel >> b_shift) & b_mask;
const uint64_t a16 = a_size ? expand16 (a, a_size) : 0xffff,
r16 = expand16 (r, r_size),
g16 = expand16 (g, g_size),
b16 = expand16 (b, b_size);
 
dst[i] = a16 << 48 | r16 << 32 | g16 << 16 | b16;
}
}
 
/*
* Contracting is easier than expanding. We just need to truncate the
* components.
*/
void
pixman_contract (uint32_t * dst,
const uint64_t *src,
int width)
{
int i;
 
/* Start at the beginning so that we can do the contraction in
* place when src == dst
*/
for (i = 0; i < width; i++)
{
const uint8_t a = src[i] >> 56,
r = src[i] >> 40,
g = src[i] >> 24,
b = src[i] >> 8;
 
dst[i] = a << 24 | r << 16 | g << 8 | b;
}
}
 
#define N_TMP_BOXES (16)
 
pixman_bool_t
pixman_region16_copy_from_region32 (pixman_region16_t *dst,
pixman_region32_t *src)
{
int n_boxes, i;
pixman_box32_t *boxes32;
pixman_box16_t *boxes16;
pixman_bool_t retval;
 
boxes32 = pixman_region32_rectangles (src, &n_boxes);
 
boxes16 = pixman_malloc_ab (n_boxes, sizeof (pixman_box16_t));
 
if (!boxes16)
return FALSE;
 
for (i = 0; i < n_boxes; ++i)
{
boxes16[i].x1 = boxes32[i].x1;
boxes16[i].y1 = boxes32[i].y1;
boxes16[i].x2 = boxes32[i].x2;
boxes16[i].y2 = boxes32[i].y2;
}
 
pixman_region_fini (dst);
retval = pixman_region_init_rects (dst, boxes16, n_boxes);
free (boxes16);
return retval;
}
 
pixman_bool_t
pixman_region32_copy_from_region16 (pixman_region32_t *dst,
pixman_region16_t *src)
{
int n_boxes, i;
pixman_box16_t *boxes16;
pixman_box32_t *boxes32;
pixman_box32_t tmp_boxes[N_TMP_BOXES];
pixman_bool_t retval;
 
boxes16 = pixman_region_rectangles (src, &n_boxes);
 
if (n_boxes > N_TMP_BOXES)
boxes32 = pixman_malloc_ab (n_boxes, sizeof (pixman_box32_t));
else
boxes32 = tmp_boxes;
 
if (!boxes32)
return FALSE;
 
for (i = 0; i < n_boxes; ++i)
{
boxes32[i].x1 = boxes16[i].x1;
boxes32[i].y1 = boxes16[i].y1;
boxes32[i].x2 = boxes16[i].x2;
boxes32[i].y2 = boxes16[i].y2;
}
 
pixman_region32_fini (dst);
retval = pixman_region32_init_rects (dst, boxes32, n_boxes);
 
if (boxes32 != tmp_boxes)
free (boxes32);
 
return retval;
}
 
#ifdef DEBUG
 
void
_pixman_log_error (const char *function, const char *message)
{
static int n_messages = 0;
 
if (n_messages < 10)
{
fprintf (stderr,
"*** BUG ***\n"
"In %s: %s\n"
"Set a breakpoint on '_pixman_log_error' to debug\n\n",
function, message);
 
n_messages++;
}
}
 
#endif
/programs/develop/libraries/pixman/pixman-version.h
0,0 → 1,50
/*
* Copyright © 2008 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Author: Carl D. Worth <cworth@cworth.org>
*/
 
#ifndef PIXMAN_VERSION_H__
#define PIXMAN_VERSION_H__
 
#ifndef PIXMAN_H__
# error pixman-version.h should only be included by pixman.h
#endif
 
#define PIXMAN_VERSION_MAJOR 0
#define PIXMAN_VERSION_MINOR 20
#define PIXMAN_VERSION_MICRO 2
 
#define PIXMAN_VERSION_STRING "0.20.2"
 
#define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \
((major) * 10000) \
+ ((minor) * 100) \
+ ((micro) * 1))
 
#define PIXMAN_VERSION PIXMAN_VERSION_ENCODE( \
PIXMAN_VERSION_MAJOR, \
PIXMAN_VERSION_MINOR, \
PIXMAN_VERSION_MICRO)
 
#endif /* PIXMAN_VERSION_H__ */
/programs/develop/libraries/pixman/pixman-x64-mmx-emulation.h
0,0 → 1,263
#ifndef MMX_X64_H_INCLUDED
#define MMX_X64_H_INCLUDED
 
/* Implementation of x64 MMX substitition functions, before
* pixman is reimplemented not to use __m64 type on Visual C++
*
* Copyright (C)2009 by George Yohng
* Released in public domain.
*/
 
#include <intrin.h>
 
#define M64C(a) (*(const __m64 *)(&a))
#define M64U(a) (*(const unsigned long long *)(&a))
 
__inline __m64
_m_from_int (int a)
{
long long i64 = a;
 
return M64C (i64);
}
 
__inline __m64
_mm_setzero_si64 ()
{
long long i64 = 0;
 
return M64C (i64);
}
 
__inline __m64
_mm_set_pi32 (int i1, int i0)
{
unsigned long long i64 = ((unsigned)i0) + (((unsigned long long)(unsigned)i1) << 32);
 
return M64C (i64);
}
 
__inline void
_m_empty ()
{
}
 
__inline __m64
_mm_set1_pi16 (short w)
{
unsigned long long i64 = ((unsigned long long)(unsigned short)(w)) * 0x0001000100010001ULL;
 
return M64C (i64);
}
 
__inline int
_m_to_int (__m64 m)
{
return m.m64_i32[0];
}
 
__inline __m64
_mm_movepi64_pi64 (__m128i a)
{
return M64C (a.m128i_i64[0]);
}
 
__inline __m64
_m_pand (__m64 a, __m64 b)
{
unsigned long long i64 = M64U (a) & M64U (b);
 
return M64C (i64);
}
 
__inline __m64
_m_por (__m64 a, __m64 b)
{
unsigned long long i64 = M64U (a) | M64U (b);
 
return M64C (i64);
}
 
__inline __m64
_m_pxor (__m64 a, __m64 b)
{
unsigned long long i64 = M64U (a) ^ M64U (b);
 
return M64C (i64);
}
 
__inline __m64
_m_pmulhuw (__m64 a, __m64 b) /* unoptimized */
{
unsigned short d[4] =
{
(unsigned short)((((unsigned)a.m64_u16[0]) * b.m64_u16[0]) >> 16),
(unsigned short)((((unsigned)a.m64_u16[1]) * b.m64_u16[1]) >> 16),
(unsigned short)((((unsigned)a.m64_u16[2]) * b.m64_u16[2]) >> 16),
(unsigned short)((((unsigned)a.m64_u16[3]) * b.m64_u16[3]) >> 16)
};
 
return M64C (d[0]);
}
 
__inline __m64
_m_pmullw2 (__m64 a, __m64 b) /* unoptimized */
{
unsigned short d[4] =
{
(unsigned short)((((unsigned)a.m64_u16[0]) * b.m64_u16[0])),
(unsigned short)((((unsigned)a.m64_u16[1]) * b.m64_u16[1])),
(unsigned short)((((unsigned)a.m64_u16[2]) * b.m64_u16[2])),
(unsigned short)((((unsigned)a.m64_u16[3]) * b.m64_u16[3]))
};
 
return M64C (d[0]);
}
 
__inline __m64
_m_pmullw (__m64 a, __m64 b) /* unoptimized */
{
unsigned long long x =
((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[0]) * b.m64_u16[0]))) +
(((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[1]) * b.m64_u16[1]))) << 16) +
(((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[2]) * b.m64_u16[2]))) << 32) +
(((unsigned long long)(unsigned short)((((unsigned)a.m64_u16[3]) * b.m64_u16[3]))) << 48);
 
return M64C (x);
}
 
__inline __m64
_m_paddusb (__m64 a, __m64 b) /* unoptimized */
{
unsigned long long x = (M64U (a) & 0x00FF00FF00FF00FFULL) +
(M64U (b) & 0x00FF00FF00FF00FFULL);
 
unsigned long long y = ((M64U (a) >> 8) & 0x00FF00FF00FF00FFULL) +
((M64U (b) >> 8) & 0x00FF00FF00FF00FFULL);
 
x |= ((x & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
y |= ((y & 0xFF00FF00FF00FF00ULL) >> 8) * 0xFF;
 
x = (x & 0x00FF00FF00FF00FFULL) | ((y & 0x00FF00FF00FF00FFULL) << 8);
 
return M64C (x);
}
 
__inline __m64
_m_paddusw (__m64 a, __m64 b) /* unoptimized */
{
unsigned long long x = (M64U (a) & 0x0000FFFF0000FFFFULL) +
(M64U (b) & 0x0000FFFF0000FFFFULL);
 
unsigned long long y = ((M64U (a) >> 16) & 0x0000FFFF0000FFFFULL) +
((M64U (b) >> 16) & 0x0000FFFF0000FFFFULL);
 
x |= ((x & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
y |= ((y & 0xFFFF0000FFFF0000) >> 16) * 0xFFFF;
 
x = (x & 0x0000FFFF0000FFFFULL) | ((y & 0x0000FFFF0000FFFFULL) << 16);
 
return M64C (x);
}
 
__inline __m64
_m_pshufw (__m64 a, int n) /* unoptimized */
{
unsigned short d[4] =
{
a.m64_u16[n & 3],
a.m64_u16[(n >> 2) & 3],
a.m64_u16[(n >> 4) & 3],
a.m64_u16[(n >> 6) & 3]
};
 
return M64C (d[0]);
}
 
__inline unsigned char
sat16 (unsigned short d)
{
if (d > 0xFF) return 0xFF;
else return d & 0xFF;
}
 
__inline __m64
_m_packuswb (__m64 m1, __m64 m2) /* unoptimized */
{
unsigned char d[8] =
{
sat16 (m1.m64_u16[0]),
sat16 (m1.m64_u16[1]),
sat16 (m1.m64_u16[2]),
sat16 (m1.m64_u16[3]),
sat16 (m2.m64_u16[0]),
sat16 (m2.m64_u16[1]),
sat16 (m2.m64_u16[2]),
sat16 (m2.m64_u16[3])
};
 
return M64C (d[0]);
}
 
__inline __m64 _m_punpcklbw (__m64 m1, __m64 m2) /* unoptimized */
{
unsigned char d[8] =
{
m1.m64_u8[0],
m2.m64_u8[0],
m1.m64_u8[1],
m2.m64_u8[1],
m1.m64_u8[2],
m2.m64_u8[2],
m1.m64_u8[3],
m2.m64_u8[3],
};
 
return M64C (d[0]);
}
 
__inline __m64 _m_punpckhbw (__m64 m1, __m64 m2) /* unoptimized */
{
unsigned char d[8] =
{
m1.m64_u8[4],
m2.m64_u8[4],
m1.m64_u8[5],
m2.m64_u8[5],
m1.m64_u8[6],
m2.m64_u8[6],
m1.m64_u8[7],
m2.m64_u8[7],
};
 
return M64C (d[0]);
}
 
__inline __m64 _m_psrlwi (__m64 a, int n) /* unoptimized */
{
unsigned short d[4] =
{
a.m64_u16[0] >> n,
a.m64_u16[1] >> n,
a.m64_u16[2] >> n,
a.m64_u16[3] >> n
};
 
return M64C (d[0]);
}
 
__inline __m64 _m_psrlqi (__m64 m, int n)
{
unsigned long long x = M64U (m) >> n;
 
return M64C (x);
}
 
__inline __m64 _m_psllqi (__m64 m, int n)
{
unsigned long long x = M64U (m) << n;
 
return M64C (x);
}
 
#endif /* MMX_X64_H_INCLUDED */
/programs/develop/libraries/pixman/pixman.c
0,0 → 1,1274
/* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* Author: Keith Packard, SuSE, Inc.
*/
 
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
 
#include <stdlib.h>
 
static force_inline pixman_implementation_t *
get_implementation (void)
{
static pixman_implementation_t *global_implementation;
 
if (!global_implementation)
global_implementation = _pixman_choose_implementation ();
 
return global_implementation;
}
 
typedef struct operator_info_t operator_info_t;
 
struct operator_info_t
{
uint8_t opaque_info[4];
};
 
#define PACK(neither, src, dest, both) \
{{ (uint8_t)PIXMAN_OP_ ## neither, \
(uint8_t)PIXMAN_OP_ ## src, \
(uint8_t)PIXMAN_OP_ ## dest, \
(uint8_t)PIXMAN_OP_ ## both }}
 
static const operator_info_t operator_table[] =
{
/* Neither Opaque Src Opaque Dst Opaque Both Opaque */
PACK (CLEAR, CLEAR, CLEAR, CLEAR),
PACK (SRC, SRC, SRC, SRC),
PACK (DST, DST, DST, DST),
PACK (OVER, SRC, OVER, SRC),
PACK (OVER_REVERSE, OVER_REVERSE, DST, DST),
PACK (IN, IN, SRC, SRC),
PACK (IN_REVERSE, DST, IN_REVERSE, DST),
PACK (OUT, OUT, CLEAR, CLEAR),
PACK (OUT_REVERSE, CLEAR, OUT_REVERSE, CLEAR),
PACK (ATOP, IN, OVER, SRC),
PACK (ATOP_REVERSE, OVER_REVERSE, IN_REVERSE, DST),
PACK (XOR, OUT, OUT_REVERSE, CLEAR),
PACK (ADD, ADD, ADD, ADD),
PACK (SATURATE, OVER_REVERSE, DST, DST),
 
{{ 0 /* 0x0e */ }},
{{ 0 /* 0x0f */ }},
 
PACK (CLEAR, CLEAR, CLEAR, CLEAR),
PACK (SRC, SRC, SRC, SRC),
PACK (DST, DST, DST, DST),
PACK (DISJOINT_OVER, DISJOINT_OVER, DISJOINT_OVER, DISJOINT_OVER),
PACK (DISJOINT_OVER_REVERSE, DISJOINT_OVER_REVERSE, DISJOINT_OVER_REVERSE, DISJOINT_OVER_REVERSE),
PACK (DISJOINT_IN, DISJOINT_IN, DISJOINT_IN, DISJOINT_IN),
PACK (DISJOINT_IN_REVERSE, DISJOINT_IN_REVERSE, DISJOINT_IN_REVERSE, DISJOINT_IN_REVERSE),
PACK (DISJOINT_OUT, DISJOINT_OUT, DISJOINT_OUT, DISJOINT_OUT),
PACK (DISJOINT_OUT_REVERSE, DISJOINT_OUT_REVERSE, DISJOINT_OUT_REVERSE, DISJOINT_OUT_REVERSE),
PACK (DISJOINT_ATOP, DISJOINT_ATOP, DISJOINT_ATOP, DISJOINT_ATOP),
PACK (DISJOINT_ATOP_REVERSE, DISJOINT_ATOP_REVERSE, DISJOINT_ATOP_REVERSE, DISJOINT_ATOP_REVERSE),
PACK (DISJOINT_XOR, DISJOINT_XOR, DISJOINT_XOR, DISJOINT_XOR),
 
{{ 0 /* 0x1c */ }},
{{ 0 /* 0x1d */ }},
{{ 0 /* 0x1e */ }},
{{ 0 /* 0x1f */ }},
 
PACK (CLEAR, CLEAR, CLEAR, CLEAR),
PACK (SRC, SRC, SRC, SRC),
PACK (DST, DST, DST, DST),
PACK (CONJOINT_OVER, CONJOINT_OVER, CONJOINT_OVER, CONJOINT_OVER),
PACK (CONJOINT_OVER_REVERSE, CONJOINT_OVER_REVERSE, CONJOINT_OVER_REVERSE, CONJOINT_OVER_REVERSE),
PACK (CONJOINT_IN, CONJOINT_IN, CONJOINT_IN, CONJOINT_IN),
PACK (CONJOINT_IN_REVERSE, CONJOINT_IN_REVERSE, CONJOINT_IN_REVERSE, CONJOINT_IN_REVERSE),
PACK (CONJOINT_OUT, CONJOINT_OUT, CONJOINT_OUT, CONJOINT_OUT),
PACK (CONJOINT_OUT_REVERSE, CONJOINT_OUT_REVERSE, CONJOINT_OUT_REVERSE, CONJOINT_OUT_REVERSE),
PACK (CONJOINT_ATOP, CONJOINT_ATOP, CONJOINT_ATOP, CONJOINT_ATOP),
PACK (CONJOINT_ATOP_REVERSE, CONJOINT_ATOP_REVERSE, CONJOINT_ATOP_REVERSE, CONJOINT_ATOP_REVERSE),
PACK (CONJOINT_XOR, CONJOINT_XOR, CONJOINT_XOR, CONJOINT_XOR),
 
{{ 0 /* 0x2c */ }},
{{ 0 /* 0x2d */ }},
{{ 0 /* 0x2e */ }},
{{ 0 /* 0x2f */ }},
 
PACK (MULTIPLY, MULTIPLY, MULTIPLY, MULTIPLY),
PACK (SCREEN, SCREEN, SCREEN, SCREEN),
PACK (OVERLAY, OVERLAY, OVERLAY, OVERLAY),
PACK (DARKEN, DARKEN, DARKEN, DARKEN),
PACK (LIGHTEN, LIGHTEN, LIGHTEN, LIGHTEN),
PACK (COLOR_DODGE, COLOR_DODGE, COLOR_DODGE, COLOR_DODGE),
PACK (COLOR_BURN, COLOR_BURN, COLOR_BURN, COLOR_BURN),
PACK (HARD_LIGHT, HARD_LIGHT, HARD_LIGHT, HARD_LIGHT),
PACK (SOFT_LIGHT, SOFT_LIGHT, SOFT_LIGHT, SOFT_LIGHT),
PACK (DIFFERENCE, DIFFERENCE, DIFFERENCE, DIFFERENCE),
PACK (EXCLUSION, EXCLUSION, EXCLUSION, EXCLUSION),
PACK (HSL_HUE, HSL_HUE, HSL_HUE, HSL_HUE),
PACK (HSL_SATURATION, HSL_SATURATION, HSL_SATURATION, HSL_SATURATION),
PACK (HSL_COLOR, HSL_COLOR, HSL_COLOR, HSL_COLOR),
PACK (HSL_LUMINOSITY, HSL_LUMINOSITY, HSL_LUMINOSITY, HSL_LUMINOSITY),
};
 
/*
* Optimize the current operator based on opacity of source or destination
* The output operator should be mathematically equivalent to the source.
*/
static pixman_op_t
optimize_operator (pixman_op_t op,
uint32_t src_flags,
uint32_t mask_flags,
uint32_t dst_flags)
{
pixman_bool_t is_source_opaque, is_dest_opaque;
 
#define OPAQUE_SHIFT 13
COMPILE_TIME_ASSERT (FAST_PATH_IS_OPAQUE == (1 << OPAQUE_SHIFT));
is_dest_opaque = (dst_flags & FAST_PATH_IS_OPAQUE);
is_source_opaque = ((src_flags & mask_flags) & FAST_PATH_IS_OPAQUE);
 
is_dest_opaque >>= OPAQUE_SHIFT - 1;
is_source_opaque >>= OPAQUE_SHIFT;
 
return operator_table[op].opaque_info[is_dest_opaque | is_source_opaque];
}
 
static void
apply_workaround (pixman_image_t *image,
int32_t * x,
int32_t * y,
uint32_t ** save_bits,
int * save_dx,
int * save_dy)
{
if (image && (image->common.flags & FAST_PATH_NEEDS_WORKAROUND))
{
/* Some X servers generate images that point to the
* wrong place in memory, but then set the clip region
* to point to the right place. Because of an old bug
* in pixman, this would actually work.
*
* Here we try and undo the damage
*/
int bpp = PIXMAN_FORMAT_BPP (image->bits.format) / 8;
pixman_box32_t *extents;
uint8_t *t;
int dx, dy;
extents = pixman_region32_extents (&(image->common.clip_region));
dx = extents->x1;
dy = extents->y1;
*save_bits = image->bits.bits;
*x -= dx;
*y -= dy;
pixman_region32_translate (&(image->common.clip_region), -dx, -dy);
t = (uint8_t *)image->bits.bits;
t += dy * image->bits.rowstride * 4 + dx * bpp;
image->bits.bits = (uint32_t *)t;
*save_dx = dx;
*save_dy = dy;
}
}
 
static void
unapply_workaround (pixman_image_t *image, uint32_t *bits, int dx, int dy)
{
if (image && (image->common.flags & FAST_PATH_NEEDS_WORKAROUND))
{
image->bits.bits = bits;
pixman_region32_translate (&image->common.clip_region, dx, dy);
}
}
 
/*
* Computing composite region
*/
static inline pixman_bool_t
clip_general_image (pixman_region32_t * region,
pixman_region32_t * clip,
int dx,
int dy)
{
if (pixman_region32_n_rects (region) == 1 &&
pixman_region32_n_rects (clip) == 1)
{
pixman_box32_t * rbox = pixman_region32_rectangles (region, NULL);
pixman_box32_t * cbox = pixman_region32_rectangles (clip, NULL);
int v;
 
if (rbox->x1 < (v = cbox->x1 + dx))
rbox->x1 = v;
if (rbox->x2 > (v = cbox->x2 + dx))
rbox->x2 = v;
if (rbox->y1 < (v = cbox->y1 + dy))
rbox->y1 = v;
if (rbox->y2 > (v = cbox->y2 + dy))
rbox->y2 = v;
if (rbox->x1 >= rbox->x2 || rbox->y1 >= rbox->y2)
{
pixman_region32_init (region);
return FALSE;
}
}
else if (!pixman_region32_not_empty (clip))
{
return FALSE;
}
else
{
if (dx || dy)
pixman_region32_translate (region, -dx, -dy);
 
if (!pixman_region32_intersect (region, region, clip))
return FALSE;
 
if (dx || dy)
pixman_region32_translate (region, dx, dy);
}
 
return pixman_region32_not_empty (region);
}
 
static inline pixman_bool_t
clip_source_image (pixman_region32_t * region,
pixman_image_t * image,
int dx,
int dy)
{
/* Source clips are ignored, unless they are explicitly turned on
* and the clip in question was set by an X client. (Because if
* the clip was not set by a client, then it is a hierarchy
* clip and those should always be ignored for sources).
*/
if (!image->common.clip_sources || !image->common.client_clip)
return TRUE;
 
return clip_general_image (region,
&image->common.clip_region,
dx, dy);
}
 
/*
* returns FALSE if the final region is empty. Indistinguishable from
* an allocation failure, but rendering ignores those anyways.
*/
static pixman_bool_t
pixman_compute_composite_region32 (pixman_region32_t * region,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
region->extents.x1 = dest_x;
region->extents.x2 = dest_x + width;
region->extents.y1 = dest_y;
region->extents.y2 = dest_y + height;
 
region->extents.x1 = MAX (region->extents.x1, 0);
region->extents.y1 = MAX (region->extents.y1, 0);
region->extents.x2 = MIN (region->extents.x2, dst_image->bits.width);
region->extents.y2 = MIN (region->extents.y2, dst_image->bits.height);
 
region->data = 0;
 
/* Check for empty operation */
if (region->extents.x1 >= region->extents.x2 ||
region->extents.y1 >= region->extents.y2)
{
region->extents.x1 = 0;
region->extents.x2 = 0;
region->extents.y1 = 0;
region->extents.y2 = 0;
return FALSE;
}
 
if (dst_image->common.have_clip_region)
{
if (!clip_general_image (region, &dst_image->common.clip_region, 0, 0))
return FALSE;
}
 
if (dst_image->common.alpha_map)
{
if (!pixman_region32_intersect_rect (region, region,
dst_image->common.alpha_origin_x,
dst_image->common.alpha_origin_y,
dst_image->common.alpha_map->width,
dst_image->common.alpha_map->height))
{
return FALSE;
}
if (!pixman_region32_not_empty (region))
return FALSE;
if (dst_image->common.alpha_map->common.have_clip_region)
{
if (!clip_general_image (region, &dst_image->common.alpha_map->common.clip_region,
-dst_image->common.alpha_origin_x,
-dst_image->common.alpha_origin_y))
{
return FALSE;
}
}
}
 
/* clip against src */
if (src_image->common.have_clip_region)
{
if (!clip_source_image (region, src_image, dest_x - src_x, dest_y - src_y))
return FALSE;
}
if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region)
{
if (!clip_source_image (region, (pixman_image_t *)src_image->common.alpha_map,
dest_x - (src_x - src_image->common.alpha_origin_x),
dest_y - (src_y - src_image->common.alpha_origin_y)))
{
return FALSE;
}
}
/* clip against mask */
if (mask_image && mask_image->common.have_clip_region)
{
if (!clip_source_image (region, mask_image, dest_x - mask_x, dest_y - mask_y))
return FALSE;
 
if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region)
{
if (!clip_source_image (region, (pixman_image_t *)mask_image->common.alpha_map,
dest_x - (mask_x - mask_image->common.alpha_origin_x),
dest_y - (mask_y - mask_image->common.alpha_origin_y)))
{
return FALSE;
}
}
}
 
return TRUE;
}
 
#define N_CACHED_FAST_PATHS 8
 
typedef struct
{
struct
{
pixman_implementation_t * imp;
pixman_fast_path_t fast_path;
} cache [N_CACHED_FAST_PATHS];
} cache_t;
 
PIXMAN_DEFINE_THREAD_LOCAL (cache_t, fast_path_cache);
 
static force_inline pixman_bool_t
lookup_composite_function (pixman_op_t op,
pixman_format_code_t src_format,
uint32_t src_flags,
pixman_format_code_t mask_format,
uint32_t mask_flags,
pixman_format_code_t dest_format,
uint32_t dest_flags,
pixman_implementation_t **out_imp,
pixman_composite_func_t *out_func)
{
pixman_implementation_t *imp;
cache_t *cache;
int i;
 
/* Check cache for fast paths */
cache = PIXMAN_GET_THREAD_LOCAL (fast_path_cache);
 
for (i = 0; i < N_CACHED_FAST_PATHS; ++i)
{
const pixman_fast_path_t *info = &(cache->cache[i].fast_path);
 
/* Note that we check for equality here, not whether
* the cached fast path matches. This is to prevent
* us from selecting an overly general fast path
* when a more specific one would work.
*/
if (info->op == op &&
info->src_format == src_format &&
info->mask_format == mask_format &&
info->dest_format == dest_format &&
info->src_flags == src_flags &&
info->mask_flags == mask_flags &&
info->dest_flags == dest_flags &&
info->func)
{
*out_imp = cache->cache[i].imp;
*out_func = cache->cache[i].fast_path.func;
 
goto update_cache;
}
}
 
for (imp = get_implementation (); imp != NULL; imp = imp->delegate)
{
const pixman_fast_path_t *info = imp->fast_paths;
 
while (info->op != PIXMAN_OP_NONE)
{
if ((info->op == op || info->op == PIXMAN_OP_any) &&
/* Formats */
((info->src_format == src_format) ||
(info->src_format == PIXMAN_any)) &&
((info->mask_format == mask_format) ||
(info->mask_format == PIXMAN_any)) &&
((info->dest_format == dest_format) ||
(info->dest_format == PIXMAN_any)) &&
/* Flags */
(info->src_flags & src_flags) == info->src_flags &&
(info->mask_flags & mask_flags) == info->mask_flags &&
(info->dest_flags & dest_flags) == info->dest_flags)
{
*out_imp = imp;
*out_func = info->func;
 
/* Set i to the last spot in the cache so that the
* move-to-front code below will work
*/
i = N_CACHED_FAST_PATHS - 1;
 
goto update_cache;
}
 
++info;
}
}
return FALSE;
 
update_cache:
if (i)
{
while (i--)
cache->cache[i + 1] = cache->cache[i];
 
cache->cache[0].imp = *out_imp;
cache->cache[0].fast_path.op = op;
cache->cache[0].fast_path.src_format = src_format;
cache->cache[0].fast_path.src_flags = src_flags;
cache->cache[0].fast_path.mask_format = mask_format;
cache->cache[0].fast_path.mask_flags = mask_flags;
cache->cache[0].fast_path.dest_format = dest_format;
cache->cache[0].fast_path.dest_flags = dest_flags;
cache->cache[0].fast_path.func = *out_func;
}
 
return TRUE;
}
 
static pixman_bool_t
compute_sample_extents (pixman_transform_t *transform,
pixman_box32_t *extents, int x, int y,
pixman_fixed_t x_off, pixman_fixed_t y_off,
pixman_fixed_t width, pixman_fixed_t height)
{
pixman_fixed_t x1, y1, x2, y2;
pixman_fixed_48_16_t tx1, ty1, tx2, ty2;
 
/* We have checked earlier that (extents->x1 - x) etc. fit in a pixman_fixed_t */
x1 = (pixman_fixed_48_16_t)pixman_int_to_fixed (extents->x1 - x) + pixman_fixed_1 / 2;
y1 = (pixman_fixed_48_16_t)pixman_int_to_fixed (extents->y1 - y) + pixman_fixed_1 / 2;
x2 = (pixman_fixed_48_16_t)pixman_int_to_fixed (extents->x2 - x) - pixman_fixed_1 / 2;
y2 = (pixman_fixed_48_16_t)pixman_int_to_fixed (extents->y2 - y) - pixman_fixed_1 / 2;
 
if (!transform)
{
tx1 = (pixman_fixed_48_16_t)x1;
ty1 = (pixman_fixed_48_16_t)y1;
tx2 = (pixman_fixed_48_16_t)x2;
ty2 = (pixman_fixed_48_16_t)y2;
}
else
{
int i;
 
/* Silence GCC */
tx1 = ty1 = tx2 = ty2 = 0;
for (i = 0; i < 4; ++i)
{
pixman_fixed_48_16_t tx, ty;
pixman_vector_t v;
 
v.vector[0] = (i & 0x01)? x1 : x2;
v.vector[1] = (i & 0x02)? y1 : y2;
v.vector[2] = pixman_fixed_1;
 
if (!pixman_transform_point (transform, &v))
return FALSE;
 
tx = (pixman_fixed_48_16_t)v.vector[0];
ty = (pixman_fixed_48_16_t)v.vector[1];
 
if (i == 0)
{
tx1 = tx;
ty1 = ty;
tx2 = tx;
ty2 = ty;
}
else
{
if (tx < tx1)
tx1 = tx;
if (ty < ty1)
ty1 = ty;
if (tx > tx2)
tx2 = tx;
if (ty > ty2)
ty2 = ty;
}
}
}
 
/* Expand the source area by a tiny bit so account of different rounding that
* may happen during sampling. Note that (8 * pixman_fixed_e) is very far from
* 0.5 so this won't cause the area computed to be overly pessimistic.
*/
tx1 += x_off - 8 * pixman_fixed_e;
ty1 += y_off - 8 * pixman_fixed_e;
tx2 += x_off + width + 8 * pixman_fixed_e;
ty2 += y_off + height + 8 * pixman_fixed_e;
 
if (tx1 < pixman_min_fixed_48_16 || tx1 > pixman_max_fixed_48_16 ||
ty1 < pixman_min_fixed_48_16 || ty1 > pixman_max_fixed_48_16 ||
tx2 < pixman_min_fixed_48_16 || tx2 > pixman_max_fixed_48_16 ||
ty2 < pixman_min_fixed_48_16 || ty2 > pixman_max_fixed_48_16)
{
return FALSE;
}
else
{
extents->x1 = pixman_fixed_to_int (tx1);
extents->y1 = pixman_fixed_to_int (ty1);
extents->x2 = pixman_fixed_to_int (tx2) + 1;
extents->y2 = pixman_fixed_to_int (ty2) + 1;
 
return TRUE;
}
}
 
#define IS_16BIT(x) (((x) >= INT16_MIN) && ((x) <= INT16_MAX))
 
static pixman_bool_t
analyze_extent (pixman_image_t *image, int x, int y,
const pixman_box32_t *extents, uint32_t *flags)
{
pixman_transform_t *transform;
pixman_fixed_t *params;
pixman_fixed_t x_off, y_off;
pixman_fixed_t width, height;
pixman_box32_t ex;
 
if (!image)
return TRUE;
 
/* Some compositing functions walk one step
* outside the destination rectangle, so we
* check here that the expanded-by-one source
* extents in destination space fits in 16 bits
*/
if (!IS_16BIT (extents->x1 - x - 1) ||
!IS_16BIT (extents->y1 - y - 1) ||
!IS_16BIT (extents->x2 - x + 1) ||
!IS_16BIT (extents->y2 - y + 1))
{
return FALSE;
}
 
transform = image->common.transform;
if (image->common.type == BITS)
{
/* During repeat mode calculations we might convert the
* width/height of an image to fixed 16.16, so we need
* them to be smaller than 16 bits.
*/
if (image->bits.width >= 0x7fff || image->bits.height >= 0x7fff)
return FALSE;
 
#define ID_AND_NEAREST (FAST_PATH_ID_TRANSFORM | FAST_PATH_NEAREST_FILTER)
if ((image->common.flags & ID_AND_NEAREST) == ID_AND_NEAREST &&
extents->x1 - x >= 0 &&
extents->y1 - y >= 0 &&
extents->x2 - x <= image->bits.width &&
extents->y2 - y <= image->bits.height)
{
*flags |= FAST_PATH_SAMPLES_COVER_CLIP;
return TRUE;
}
switch (image->common.filter)
{
case PIXMAN_FILTER_CONVOLUTION:
params = image->common.filter_params;
x_off = - pixman_fixed_e - ((params[0] - pixman_fixed_1) >> 1);
y_off = - pixman_fixed_e - ((params[1] - pixman_fixed_1) >> 1);
width = params[0];
height = params[1];
break;
 
case PIXMAN_FILTER_GOOD:
case PIXMAN_FILTER_BEST:
case PIXMAN_FILTER_BILINEAR:
x_off = - pixman_fixed_1 / 2;
y_off = - pixman_fixed_1 / 2;
width = pixman_fixed_1;
height = pixman_fixed_1;
break;
 
case PIXMAN_FILTER_FAST:
case PIXMAN_FILTER_NEAREST:
x_off = - pixman_fixed_e;
y_off = - pixman_fixed_e;
width = 0;
height = 0;
break;
 
default:
return FALSE;
}
 
/* Check whether the non-expanded, transformed extent is entirely within
* the source image, and set the FAST_PATH_SAMPLES_COVER_CLIP if it is.
*/
ex = *extents;
if (compute_sample_extents (transform, &ex, x, y, x_off, y_off, width, height) &&
ex.x1 >= 0 && ex.y1 >= 0 &&
ex.x2 <= image->bits.width && ex.y2 <= image->bits.height)
{
*flags |= FAST_PATH_SAMPLES_COVER_CLIP;
}
}
else
{
x_off = 0;
y_off = 0;
width = 0;
height = 0;
}
 
/* Check that the extents expanded by one don't overflow. This ensures that
* compositing functions can simply walk the source space using 16.16
* variables without worrying about overflow.
*/
ex.x1 = extents->x1 - 1;
ex.y1 = extents->y1 - 1;
ex.x2 = extents->x2 + 1;
ex.y2 = extents->y2 + 1;
 
if (!compute_sample_extents (transform, &ex, x, y, x_off, y_off, width, height))
return FALSE;
 
return TRUE;
}
 
/*
* Work around GCC bug causing crashes in Mozilla with SSE2
*
* When using -msse, gcc generates movdqa instructions assuming that
* the stack is 16 byte aligned. Unfortunately some applications, such
* as Mozilla and Mono, end up aligning the stack to 4 bytes, which
* causes the movdqa instructions to fail.
*
* The __force_align_arg_pointer__ makes gcc generate a prologue that
* realigns the stack pointer to 16 bytes.
*
* On x86-64 this is not necessary because the standard ABI already
* calls for a 16 byte aligned stack.
*
* See https://bugs.freedesktop.org/show_bug.cgi?id=15693
*/
#if defined (USE_SSE2) && defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
__attribute__((__force_align_arg_pointer__))
#endif
PIXMAN_EXPORT void
pixman_image_composite32 (pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
pixman_format_code_t src_format, mask_format, dest_format;
uint32_t src_flags, mask_flags, dest_flags;
pixman_region32_t region;
pixman_box32_t *extents;
uint32_t *src_bits;
int src_dx, src_dy;
uint32_t *mask_bits;
int mask_dx, mask_dy;
uint32_t *dest_bits;
int dest_dx, dest_dy;
pixman_bool_t need_workaround;
pixman_implementation_t *imp;
pixman_composite_func_t func;
 
_pixman_image_validate (src);
if (mask)
_pixman_image_validate (mask);
_pixman_image_validate (dest);
 
src_format = src->common.extended_format_code;
src_flags = src->common.flags;
 
if (mask)
{
mask_format = mask->common.extended_format_code;
mask_flags = mask->common.flags;
}
else
{
mask_format = PIXMAN_null;
mask_flags = FAST_PATH_IS_OPAQUE;
}
 
dest_format = dest->common.extended_format_code;
dest_flags = dest->common.flags;
 
/* Check for pixbufs */
if ((mask_format == PIXMAN_a8r8g8b8 || mask_format == PIXMAN_a8b8g8r8) &&
(src->type == BITS && src->bits.bits == mask->bits.bits) &&
(src->common.repeat == mask->common.repeat) &&
(src_x == mask_x && src_y == mask_y))
{
if (src_format == PIXMAN_x8b8g8r8)
src_format = mask_format = PIXMAN_pixbuf;
else if (src_format == PIXMAN_x8r8g8b8)
src_format = mask_format = PIXMAN_rpixbuf;
}
 
/* Check for workaround */
need_workaround = (src_flags | mask_flags | dest_flags) & FAST_PATH_NEEDS_WORKAROUND;
 
if (need_workaround)
{
apply_workaround (src, &src_x, &src_y, &src_bits, &src_dx, &src_dy);
apply_workaround (mask, &mask_x, &mask_y, &mask_bits, &mask_dx, &mask_dy);
apply_workaround (dest, &dest_x, &dest_y, &dest_bits, &dest_dx, &dest_dy);
}
 
pixman_region32_init (&region);
 
if (!pixman_compute_composite_region32 (
&region, src, mask, dest,
src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height))
{
goto out;
}
 
extents = pixman_region32_extents (&region);
 
if (!analyze_extent (src, dest_x - src_x, dest_y - src_y, extents, &src_flags))
goto out;
 
if (!analyze_extent (mask, dest_x - mask_x, dest_y - mask_y, extents, &mask_flags))
goto out;
 
/* If the clip is within the source samples, and the samples are opaque,
* then the source is effectively opaque.
*/
#define BOTH (FAST_PATH_SAMPLES_OPAQUE | FAST_PATH_SAMPLES_COVER_CLIP)
 
if ((src_flags & BOTH) == BOTH)
src_flags |= FAST_PATH_IS_OPAQUE;
if ((mask_flags & BOTH) == BOTH)
mask_flags |= FAST_PATH_IS_OPAQUE;
/*
* Check if we can replace our operator by a simpler one
* if the src or dest are opaque. The output operator should be
* mathematically equivalent to the source.
*/
op = optimize_operator (op, src_flags, mask_flags, dest_flags);
if (op == PIXMAN_OP_DST)
goto out;
 
if (lookup_composite_function (op,
src_format, src_flags,
mask_format, mask_flags,
dest_format, dest_flags,
&imp, &func))
{
const pixman_box32_t *pbox;
int n;
 
pbox = pixman_region32_rectangles (&region, &n);
while (n--)
{
func (imp, op,
src, mask, dest,
pbox->x1 + src_x - dest_x,
pbox->y1 + src_y - dest_y,
pbox->x1 + mask_x - dest_x,
pbox->y1 + mask_y - dest_y,
pbox->x1,
pbox->y1,
pbox->x2 - pbox->x1,
pbox->y2 - pbox->y1);
pbox++;
}
}
 
out:
if (need_workaround)
{
unapply_workaround (src, src_bits, src_dx, src_dy);
unapply_workaround (mask, mask_bits, mask_dx, mask_dy);
unapply_workaround (dest, dest_bits, dest_dx, dest_dy);
}
 
pixman_region32_fini (&region);
}
 
PIXMAN_EXPORT void
pixman_image_composite (pixman_op_t op,
pixman_image_t * src,
pixman_image_t * mask,
pixman_image_t * dest,
int16_t src_x,
int16_t src_y,
int16_t mask_x,
int16_t mask_y,
int16_t dest_x,
int16_t dest_y,
uint16_t width,
uint16_t height)
{
pixman_image_composite32 (op, src, mask, dest, src_x, src_y,
mask_x, mask_y, dest_x, dest_y, width, height);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_blt (uint32_t *src_bits,
uint32_t *dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height)
{
return _pixman_implementation_blt (get_implementation(),
src_bits, dst_bits, src_stride, dst_stride,
src_bpp, dst_bpp,
src_x, src_y,
dst_x, dst_y,
width, height);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_fill (uint32_t *bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
return _pixman_implementation_fill (
get_implementation(), bits, stride, bpp, x, y, width, height, xor);
}
 
static uint32_t
color_to_uint32 (const pixman_color_t *color)
{
return
(color->alpha >> 8 << 24) |
(color->red >> 8 << 16) |
(color->green & 0xff00) |
(color->blue >> 8);
}
 
static pixman_bool_t
color_to_pixel (pixman_color_t * color,
uint32_t * pixel,
pixman_format_code_t format)
{
uint32_t c = color_to_uint32 (color);
 
if (!(format == PIXMAN_a8r8g8b8 ||
format == PIXMAN_x8r8g8b8 ||
format == PIXMAN_a8b8g8r8 ||
format == PIXMAN_x8b8g8r8 ||
format == PIXMAN_b8g8r8a8 ||
format == PIXMAN_b8g8r8x8 ||
format == PIXMAN_r5g6b5 ||
format == PIXMAN_b5g6r5 ||
format == PIXMAN_a8))
{
return FALSE;
}
 
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_ABGR)
{
c = ((c & 0xff000000) >> 0) |
((c & 0x00ff0000) >> 16) |
((c & 0x0000ff00) >> 0) |
((c & 0x000000ff) << 16);
}
if (PIXMAN_FORMAT_TYPE (format) == PIXMAN_TYPE_BGRA)
{
c = ((c & 0xff000000) >> 24) |
((c & 0x00ff0000) >> 8) |
((c & 0x0000ff00) << 8) |
((c & 0x000000ff) << 24);
}
 
if (format == PIXMAN_a8)
c = c >> 24;
else if (format == PIXMAN_r5g6b5 ||
format == PIXMAN_b5g6r5)
c = CONVERT_8888_TO_0565 (c);
 
#if 0
printf ("color: %x %x %x %x\n", color->alpha, color->red, color->green, color->blue);
printf ("pixel: %x\n", c);
#endif
 
*pixel = c;
return TRUE;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_fill_rectangles (pixman_op_t op,
pixman_image_t * dest,
pixman_color_t * color,
int n_rects,
const pixman_rectangle16_t *rects)
{
pixman_box32_t stack_boxes[6];
pixman_box32_t *boxes;
pixman_bool_t result;
int i;
 
if (n_rects > 6)
{
boxes = pixman_malloc_ab (sizeof (pixman_box32_t), n_rects);
if (boxes == NULL)
return FALSE;
}
else
{
boxes = stack_boxes;
}
 
for (i = 0; i < n_rects; ++i)
{
boxes[i].x1 = rects[i].x;
boxes[i].y1 = rects[i].y;
boxes[i].x2 = boxes[i].x1 + rects[i].width;
boxes[i].y2 = boxes[i].y1 + rects[i].height;
}
 
result = pixman_image_fill_boxes (op, dest, color, n_rects, boxes);
 
if (boxes != stack_boxes)
free (boxes);
return result;
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_image_fill_boxes (pixman_op_t op,
pixman_image_t * dest,
pixman_color_t * color,
int n_boxes,
const pixman_box32_t *boxes)
{
pixman_image_t *solid;
pixman_color_t c;
int i;
 
_pixman_image_validate (dest);
if (color->alpha == 0xffff)
{
if (op == PIXMAN_OP_OVER)
op = PIXMAN_OP_SRC;
}
 
if (op == PIXMAN_OP_CLEAR)
{
c.red = 0;
c.green = 0;
c.blue = 0;
c.alpha = 0;
 
color = &c;
 
op = PIXMAN_OP_SRC;
}
 
if (op == PIXMAN_OP_SRC)
{
uint32_t pixel;
 
if (color_to_pixel (color, &pixel, dest->bits.format))
{
pixman_region32_t fill_region;
int n_rects, j;
pixman_box32_t *rects;
 
if (!pixman_region32_init_rects (&fill_region, boxes, n_boxes))
return FALSE;
 
if (dest->common.have_clip_region)
{
if (!pixman_region32_intersect (&fill_region,
&fill_region,
&dest->common.clip_region))
return FALSE;
}
 
rects = pixman_region32_rectangles (&fill_region, &n_rects);
for (j = 0; j < n_rects; ++j)
{
const pixman_box32_t *rect = &(rects[j]);
pixman_fill (dest->bits.bits, dest->bits.rowstride, PIXMAN_FORMAT_BPP (dest->bits.format),
rect->x1, rect->y1, rect->x2 - rect->x1, rect->y2 - rect->y1,
pixel);
}
 
pixman_region32_fini (&fill_region);
return TRUE;
}
}
 
solid = pixman_image_create_solid_fill (color);
if (!solid)
return FALSE;
 
for (i = 0; i < n_boxes; ++i)
{
const pixman_box32_t *box = &(boxes[i]);
 
pixman_image_composite32 (op, solid, NULL, dest,
0, 0, 0, 0,
box->x1, box->y1,
box->x2 - box->x1, box->y2 - box->y1);
}
 
pixman_image_unref (solid);
 
return TRUE;
}
 
/**
* pixman_version:
*
* Returns the version of the pixman library encoded in a single
* integer as per %PIXMAN_VERSION_ENCODE. The encoding ensures that
* later versions compare greater than earlier versions.
*
* A run-time comparison to check that pixman's version is greater than
* or equal to version X.Y.Z could be performed as follows:
*
* <informalexample><programlisting>
* if (pixman_version() >= PIXMAN_VERSION_ENCODE(X,Y,Z)) {...}
* </programlisting></informalexample>
*
* See also pixman_version_string() as well as the compile-time
* equivalents %PIXMAN_VERSION and %PIXMAN_VERSION_STRING.
*
* Return value: the encoded version.
**/
PIXMAN_EXPORT int
pixman_version (void)
{
return PIXMAN_VERSION;
}
 
/**
* pixman_version_string:
*
* Returns the version of the pixman library as a human-readable string
* of the form "X.Y.Z".
*
* See also pixman_version() as well as the compile-time equivalents
* %PIXMAN_VERSION_STRING and %PIXMAN_VERSION.
*
* Return value: a string containing the version.
**/
PIXMAN_EXPORT const char*
pixman_version_string (void)
{
return PIXMAN_VERSION_STRING;
}
 
/**
* pixman_format_supported_source:
* @format: A pixman_format_code_t format
*
* Return value: whether the provided format code is a supported
* format for a pixman surface used as a source in
* rendering.
*
* Currently, all pixman_format_code_t values are supported.
**/
PIXMAN_EXPORT pixman_bool_t
pixman_format_supported_source (pixman_format_code_t format)
{
switch (format)
{
/* 32 bpp formats */
case PIXMAN_a2b10g10r10:
case PIXMAN_x2b10g10r10:
case PIXMAN_a2r10g10b10:
case PIXMAN_x2r10g10b10:
case PIXMAN_a8r8g8b8:
case PIXMAN_x8r8g8b8:
case PIXMAN_a8b8g8r8:
case PIXMAN_x8b8g8r8:
case PIXMAN_b8g8r8a8:
case PIXMAN_b8g8r8x8:
case PIXMAN_r8g8b8:
case PIXMAN_b8g8r8:
case PIXMAN_r5g6b5:
case PIXMAN_b5g6r5:
case PIXMAN_x14r6g6b6:
/* 16 bpp formats */
case PIXMAN_a1r5g5b5:
case PIXMAN_x1r5g5b5:
case PIXMAN_a1b5g5r5:
case PIXMAN_x1b5g5r5:
case PIXMAN_a4r4g4b4:
case PIXMAN_x4r4g4b4:
case PIXMAN_a4b4g4r4:
case PIXMAN_x4b4g4r4:
/* 8bpp formats */
case PIXMAN_a8:
case PIXMAN_r3g3b2:
case PIXMAN_b2g3r3:
case PIXMAN_a2r2g2b2:
case PIXMAN_a2b2g2r2:
case PIXMAN_c8:
case PIXMAN_g8:
case PIXMAN_x4a4:
/* Collides with PIXMAN_c8
case PIXMAN_x4c4:
*/
/* Collides with PIXMAN_g8
case PIXMAN_x4g4:
*/
/* 4bpp formats */
case PIXMAN_a4:
case PIXMAN_r1g2b1:
case PIXMAN_b1g2r1:
case PIXMAN_a1r1g1b1:
case PIXMAN_a1b1g1r1:
case PIXMAN_c4:
case PIXMAN_g4:
/* 1bpp formats */
case PIXMAN_a1:
case PIXMAN_g1:
/* YUV formats */
case PIXMAN_yuy2:
case PIXMAN_yv12:
return TRUE;
 
default:
return FALSE;
}
}
 
/**
* pixman_format_supported_destination:
* @format: A pixman_format_code_t format
*
* Return value: whether the provided format code is a supported
* format for a pixman surface used as a destination in
* rendering.
*
* Currently, all pixman_format_code_t values are supported
* except for the YUV formats.
**/
PIXMAN_EXPORT pixman_bool_t
pixman_format_supported_destination (pixman_format_code_t format)
{
/* YUV formats cannot be written to at the moment */
if (format == PIXMAN_yuy2 || format == PIXMAN_yv12)
return FALSE;
 
return pixman_format_supported_source (format);
}
 
PIXMAN_EXPORT pixman_bool_t
pixman_compute_composite_region (pixman_region16_t * region,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dst_image,
int16_t src_x,
int16_t src_y,
int16_t mask_x,
int16_t mask_y,
int16_t dest_x,
int16_t dest_y,
uint16_t width,
uint16_t height)
{
pixman_region32_t r32;
pixman_bool_t retval;
 
pixman_region32_init (&r32);
 
retval = pixman_compute_composite_region32 (
&r32, src_image, mask_image, dst_image,
src_x, src_y, mask_x, mask_y, dest_x, dest_y,
width, height);
 
if (retval)
{
if (!pixman_region16_copy_from_region32 (region, &r32))
retval = FALSE;
}
 
pixman_region32_fini (&r32);
return retval;
}
/programs/develop/libraries/pixman/pixman.h
0,0 → 1,950
/***********************************************************
 
Copyright 1987, 1998 The Open Group
 
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
 
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
 
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
Except as contained in this notice, the name of The Open Group shall not be
used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from The Open Group.
 
Copyright 1987 by Digital Equipment Corporation, Maynard, Massachusetts.
 
All Rights Reserved
 
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of Digital not be
used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
 
DIGITAL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
 
******************************************************************/
/*
* Copyright © 1998, 2004 Keith Packard
* Copyright 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
 
#ifndef PIXMAN_H__
#define PIXMAN_H__
 
#include <pixman-version.h>
 
#ifdef __cplusplus
#define PIXMAN_BEGIN_DECLS extern "C" {
#define PIXMAN_END_DECLS }
#else
#define PIXMAN_BEGIN_DECLS
#define PIXMAN_END_DECLS
#endif
 
PIXMAN_BEGIN_DECLS
 
/*
* Standard integers
*/
 
#if !defined (PIXMAN_DONT_DEFINE_STDINT)
 
#if defined (_SVR4) || defined (SVR4) || defined (__OpenBSD__) || defined (_sgi) || defined (__sun) || defined (sun) || defined (__digital__) || defined (__HP_cc)
# include <inttypes.h>
/* VS 2010 (_MSC_VER 1600) has stdint.h */
#elif defined (_MSC_VER) && _MSC_VER < 1600
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#elif defined (_AIX)
# include <sys/inttypes.h>
#else
# include <stdint.h>
#endif
 
#endif
 
/*
* Boolean
*/
typedef int pixman_bool_t;
 
/*
* Fixpoint numbers
*/
typedef int64_t pixman_fixed_32_32_t;
typedef pixman_fixed_32_32_t pixman_fixed_48_16_t;
typedef uint32_t pixman_fixed_1_31_t;
typedef uint32_t pixman_fixed_1_16_t;
typedef int32_t pixman_fixed_16_16_t;
typedef pixman_fixed_16_16_t pixman_fixed_t;
 
#define pixman_fixed_e ((pixman_fixed_t) 1)
#define pixman_fixed_1 (pixman_int_to_fixed(1))
#define pixman_fixed_1_minus_e (pixman_fixed_1 - pixman_fixed_e)
#define pixman_fixed_minus_1 (pixman_int_to_fixed(-1))
#define pixman_fixed_to_int(f) ((int) ((f) >> 16))
#define pixman_int_to_fixed(i) ((pixman_fixed_t) ((i) << 16))
#define pixman_fixed_to_double(f) (double) ((f) / (double) pixman_fixed_1)
#define pixman_double_to_fixed(d) ((pixman_fixed_t) ((d) * 65536.0))
#define pixman_fixed_frac(f) ((f) & pixman_fixed_1_minus_e)
#define pixman_fixed_floor(f) ((f) & ~pixman_fixed_1_minus_e)
#define pixman_fixed_ceil(f) pixman_fixed_floor ((f) + pixman_fixed_1_minus_e)
#define pixman_fixed_fraction(f) ((f) & pixman_fixed_1_minus_e)
#define pixman_fixed_mod_2(f) ((f) & (pixman_fixed1 | pixman_fixed_1_minus_e))
#define pixman_max_fixed_48_16 ((pixman_fixed_48_16_t) 0x7fffffff)
#define pixman_min_fixed_48_16 (-((pixman_fixed_48_16_t) 1 << 31))
 
/*
* Misc structs
*/
typedef struct pixman_color pixman_color_t;
typedef struct pixman_point_fixed pixman_point_fixed_t;
typedef struct pixman_line_fixed pixman_line_fixed_t;
typedef struct pixman_vector pixman_vector_t;
typedef struct pixman_transform pixman_transform_t;
 
struct pixman_color
{
uint16_t red;
uint16_t green;
uint16_t blue;
uint16_t alpha;
};
 
struct pixman_point_fixed
{
pixman_fixed_t x;
pixman_fixed_t y;
};
 
struct pixman_line_fixed
{
pixman_point_fixed_t p1, p2;
};
 
/*
* Fixed point matrices
*/
 
struct pixman_vector
{
pixman_fixed_t vector[3];
};
 
struct pixman_transform
{
pixman_fixed_t matrix[3][3];
};
 
/* forward declaration (sorry) */
struct pixman_box16;
typedef union pixman_image pixman_image_t;
 
void pixman_transform_init_identity (struct pixman_transform *matrix);
pixman_bool_t pixman_transform_point_3d (const struct pixman_transform *transform,
struct pixman_vector *vector);
pixman_bool_t pixman_transform_point (const struct pixman_transform *transform,
struct pixman_vector *vector);
pixman_bool_t pixman_transform_multiply (struct pixman_transform *dst,
const struct pixman_transform *l,
const struct pixman_transform *r);
void pixman_transform_init_scale (struct pixman_transform *t,
pixman_fixed_t sx,
pixman_fixed_t sy);
pixman_bool_t pixman_transform_scale (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t sx,
pixman_fixed_t sy);
void pixman_transform_init_rotate (struct pixman_transform *t,
pixman_fixed_t cos,
pixman_fixed_t sin);
pixman_bool_t pixman_transform_rotate (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t c,
pixman_fixed_t s);
void pixman_transform_init_translate (struct pixman_transform *t,
pixman_fixed_t tx,
pixman_fixed_t ty);
pixman_bool_t pixman_transform_translate (struct pixman_transform *forward,
struct pixman_transform *reverse,
pixman_fixed_t tx,
pixman_fixed_t ty);
pixman_bool_t pixman_transform_bounds (const struct pixman_transform *matrix,
struct pixman_box16 *b);
pixman_bool_t pixman_transform_invert (struct pixman_transform *dst,
const struct pixman_transform *src);
pixman_bool_t pixman_transform_is_identity (const struct pixman_transform *t);
pixman_bool_t pixman_transform_is_scale (const struct pixman_transform *t);
pixman_bool_t pixman_transform_is_int_translate (const struct pixman_transform *t);
pixman_bool_t pixman_transform_is_inverse (const struct pixman_transform *a,
const struct pixman_transform *b);
 
/*
* Floating point matrices
*/
struct pixman_f_vector
{
double v[3];
};
 
struct pixman_f_transform
{
double m[3][3];
};
 
pixman_bool_t pixman_transform_from_pixman_f_transform (struct pixman_transform *t,
const struct pixman_f_transform *ft);
void pixman_f_transform_from_pixman_transform (struct pixman_f_transform *ft,
const struct pixman_transform *t);
pixman_bool_t pixman_f_transform_invert (struct pixman_f_transform *dst,
const struct pixman_f_transform *src);
pixman_bool_t pixman_f_transform_point (const struct pixman_f_transform *t,
struct pixman_f_vector *v);
void pixman_f_transform_point_3d (const struct pixman_f_transform *t,
struct pixman_f_vector *v);
void pixman_f_transform_multiply (struct pixman_f_transform *dst,
const struct pixman_f_transform *l,
const struct pixman_f_transform *r);
void pixman_f_transform_init_scale (struct pixman_f_transform *t,
double sx,
double sy);
pixman_bool_t pixman_f_transform_scale (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double sx,
double sy);
void pixman_f_transform_init_rotate (struct pixman_f_transform *t,
double cos,
double sin);
pixman_bool_t pixman_f_transform_rotate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double c,
double s);
void pixman_f_transform_init_translate (struct pixman_f_transform *t,
double tx,
double ty);
pixman_bool_t pixman_f_transform_translate (struct pixman_f_transform *forward,
struct pixman_f_transform *reverse,
double tx,
double ty);
pixman_bool_t pixman_f_transform_bounds (const struct pixman_f_transform *t,
struct pixman_box16 *b);
void pixman_f_transform_init_identity (struct pixman_f_transform *t);
 
typedef enum
{
PIXMAN_REPEAT_NONE,
PIXMAN_REPEAT_NORMAL,
PIXMAN_REPEAT_PAD,
PIXMAN_REPEAT_REFLECT
} pixman_repeat_t;
 
typedef enum
{
PIXMAN_FILTER_FAST,
PIXMAN_FILTER_GOOD,
PIXMAN_FILTER_BEST,
PIXMAN_FILTER_NEAREST,
PIXMAN_FILTER_BILINEAR,
PIXMAN_FILTER_CONVOLUTION
} pixman_filter_t;
 
typedef enum
{
PIXMAN_OP_CLEAR = 0x00,
PIXMAN_OP_SRC = 0x01,
PIXMAN_OP_DST = 0x02,
PIXMAN_OP_OVER = 0x03,
PIXMAN_OP_OVER_REVERSE = 0x04,
PIXMAN_OP_IN = 0x05,
PIXMAN_OP_IN_REVERSE = 0x06,
PIXMAN_OP_OUT = 0x07,
PIXMAN_OP_OUT_REVERSE = 0x08,
PIXMAN_OP_ATOP = 0x09,
PIXMAN_OP_ATOP_REVERSE = 0x0a,
PIXMAN_OP_XOR = 0x0b,
PIXMAN_OP_ADD = 0x0c,
PIXMAN_OP_SATURATE = 0x0d,
 
PIXMAN_OP_DISJOINT_CLEAR = 0x10,
PIXMAN_OP_DISJOINT_SRC = 0x11,
PIXMAN_OP_DISJOINT_DST = 0x12,
PIXMAN_OP_DISJOINT_OVER = 0x13,
PIXMAN_OP_DISJOINT_OVER_REVERSE = 0x14,
PIXMAN_OP_DISJOINT_IN = 0x15,
PIXMAN_OP_DISJOINT_IN_REVERSE = 0x16,
PIXMAN_OP_DISJOINT_OUT = 0x17,
PIXMAN_OP_DISJOINT_OUT_REVERSE = 0x18,
PIXMAN_OP_DISJOINT_ATOP = 0x19,
PIXMAN_OP_DISJOINT_ATOP_REVERSE = 0x1a,
PIXMAN_OP_DISJOINT_XOR = 0x1b,
 
PIXMAN_OP_CONJOINT_CLEAR = 0x20,
PIXMAN_OP_CONJOINT_SRC = 0x21,
PIXMAN_OP_CONJOINT_DST = 0x22,
PIXMAN_OP_CONJOINT_OVER = 0x23,
PIXMAN_OP_CONJOINT_OVER_REVERSE = 0x24,
PIXMAN_OP_CONJOINT_IN = 0x25,
PIXMAN_OP_CONJOINT_IN_REVERSE = 0x26,
PIXMAN_OP_CONJOINT_OUT = 0x27,
PIXMAN_OP_CONJOINT_OUT_REVERSE = 0x28,
PIXMAN_OP_CONJOINT_ATOP = 0x29,
PIXMAN_OP_CONJOINT_ATOP_REVERSE = 0x2a,
PIXMAN_OP_CONJOINT_XOR = 0x2b,
 
PIXMAN_OP_MULTIPLY = 0x30,
PIXMAN_OP_SCREEN = 0x31,
PIXMAN_OP_OVERLAY = 0x32,
PIXMAN_OP_DARKEN = 0x33,
PIXMAN_OP_LIGHTEN = 0x34,
PIXMAN_OP_COLOR_DODGE = 0x35,
PIXMAN_OP_COLOR_BURN = 0x36,
PIXMAN_OP_HARD_LIGHT = 0x37,
PIXMAN_OP_SOFT_LIGHT = 0x38,
PIXMAN_OP_DIFFERENCE = 0x39,
PIXMAN_OP_EXCLUSION = 0x3a,
PIXMAN_OP_HSL_HUE = 0x3b,
PIXMAN_OP_HSL_SATURATION = 0x3c,
PIXMAN_OP_HSL_COLOR = 0x3d,
PIXMAN_OP_HSL_LUMINOSITY = 0x3e
 
#ifdef PIXMAN_USE_INTERNAL_API
,
PIXMAN_N_OPERATORS,
PIXMAN_OP_NONE = PIXMAN_N_OPERATORS
#endif
} pixman_op_t;
 
/*
* Regions
*/
typedef struct pixman_region16_data pixman_region16_data_t;
typedef struct pixman_box16 pixman_box16_t;
typedef struct pixman_rectangle16 pixman_rectangle16_t;
typedef struct pixman_region16 pixman_region16_t;
 
struct pixman_region16_data {
long size;
long numRects;
/* pixman_box16_t rects[size]; in memory but not explicitly declared */
};
 
struct pixman_rectangle16
{
int16_t x, y;
uint16_t width, height;
};
 
struct pixman_box16
{
int16_t x1, y1, x2, y2;
};
 
struct pixman_region16
{
pixman_box16_t extents;
pixman_region16_data_t *data;
};
 
typedef enum
{
PIXMAN_REGION_OUT,
PIXMAN_REGION_IN,
PIXMAN_REGION_PART
} pixman_region_overlap_t;
 
/* This function exists only to make it possible to preserve
* the X ABI - it should go away at first opportunity.
*/
void pixman_region_set_static_pointers (pixman_box16_t *empty_box,
pixman_region16_data_t *empty_data,
pixman_region16_data_t *broken_data);
 
/* creation/destruction */
void pixman_region_init (pixman_region16_t *region);
void pixman_region_init_rect (pixman_region16_t *region,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_init_rects (pixman_region16_t *region,
const pixman_box16_t *boxes,
int count);
void pixman_region_init_with_extents (pixman_region16_t *region,
pixman_box16_t *extents);
void pixman_region_init_from_image (pixman_region16_t *region,
pixman_image_t *image);
void pixman_region_fini (pixman_region16_t *region);
 
 
/* manipulation */
void pixman_region_translate (pixman_region16_t *region,
int x,
int y);
pixman_bool_t pixman_region_copy (pixman_region16_t *dest,
pixman_region16_t *source);
pixman_bool_t pixman_region_intersect (pixman_region16_t *new_reg,
pixman_region16_t *reg1,
pixman_region16_t *reg2);
pixman_bool_t pixman_region_union (pixman_region16_t *new_reg,
pixman_region16_t *reg1,
pixman_region16_t *reg2);
pixman_bool_t pixman_region_union_rect (pixman_region16_t *dest,
pixman_region16_t *source,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_intersect_rect (pixman_region16_t *dest,
pixman_region16_t *source,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region_subtract (pixman_region16_t *reg_d,
pixman_region16_t *reg_m,
pixman_region16_t *reg_s);
pixman_bool_t pixman_region_inverse (pixman_region16_t *new_reg,
pixman_region16_t *reg1,
pixman_box16_t *inv_rect);
pixman_bool_t pixman_region_contains_point (pixman_region16_t *region,
int x,
int y,
pixman_box16_t *box);
pixman_region_overlap_t pixman_region_contains_rectangle (pixman_region16_t *region,
pixman_box16_t *prect);
pixman_bool_t pixman_region_not_empty (pixman_region16_t *region);
pixman_box16_t * pixman_region_extents (pixman_region16_t *region);
int pixman_region_n_rects (pixman_region16_t *region);
pixman_box16_t * pixman_region_rectangles (pixman_region16_t *region,
int *n_rects);
pixman_bool_t pixman_region_equal (pixman_region16_t *region1,
pixman_region16_t *region2);
pixman_bool_t pixman_region_selfcheck (pixman_region16_t *region);
void pixman_region_reset (pixman_region16_t *region,
pixman_box16_t *box);
/*
* 32 bit regions
*/
typedef struct pixman_region32_data pixman_region32_data_t;
typedef struct pixman_box32 pixman_box32_t;
typedef struct pixman_rectangle32 pixman_rectangle32_t;
typedef struct pixman_region32 pixman_region32_t;
 
struct pixman_region32_data {
long size;
long numRects;
/* pixman_box32_t rects[size]; in memory but not explicitly declared */
};
 
struct pixman_rectangle32
{
int32_t x, y;
uint32_t width, height;
};
 
struct pixman_box32
{
int32_t x1, y1, x2, y2;
};
 
struct pixman_region32
{
pixman_box32_t extents;
pixman_region32_data_t *data;
};
 
/* creation/destruction */
void pixman_region32_init (pixman_region32_t *region);
void pixman_region32_init_rect (pixman_region32_t *region,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region32_init_rects (pixman_region32_t *region,
const pixman_box32_t *boxes,
int count);
void pixman_region32_init_with_extents (pixman_region32_t *region,
pixman_box32_t *extents);
void pixman_region32_init_from_image (pixman_region32_t *region,
pixman_image_t *image);
void pixman_region32_fini (pixman_region32_t *region);
 
 
/* manipulation */
void pixman_region32_translate (pixman_region32_t *region,
int x,
int y);
pixman_bool_t pixman_region32_copy (pixman_region32_t *dest,
pixman_region32_t *source);
pixman_bool_t pixman_region32_intersect (pixman_region32_t *new_reg,
pixman_region32_t *reg1,
pixman_region32_t *reg2);
pixman_bool_t pixman_region32_union (pixman_region32_t *new_reg,
pixman_region32_t *reg1,
pixman_region32_t *reg2);
pixman_bool_t pixman_region32_intersect_rect (pixman_region32_t *dest,
pixman_region32_t *source,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region32_union_rect (pixman_region32_t *dest,
pixman_region32_t *source,
int x,
int y,
unsigned int width,
unsigned int height);
pixman_bool_t pixman_region32_subtract (pixman_region32_t *reg_d,
pixman_region32_t *reg_m,
pixman_region32_t *reg_s);
pixman_bool_t pixman_region32_inverse (pixman_region32_t *new_reg,
pixman_region32_t *reg1,
pixman_box32_t *inv_rect);
pixman_bool_t pixman_region32_contains_point (pixman_region32_t *region,
int x,
int y,
pixman_box32_t *box);
pixman_region_overlap_t pixman_region32_contains_rectangle (pixman_region32_t *region,
pixman_box32_t *prect);
pixman_bool_t pixman_region32_not_empty (pixman_region32_t *region);
pixman_box32_t * pixman_region32_extents (pixman_region32_t *region);
int pixman_region32_n_rects (pixman_region32_t *region);
pixman_box32_t * pixman_region32_rectangles (pixman_region32_t *region,
int *n_rects);
pixman_bool_t pixman_region32_equal (pixman_region32_t *region1,
pixman_region32_t *region2);
pixman_bool_t pixman_region32_selfcheck (pixman_region32_t *region);
void pixman_region32_reset (pixman_region32_t *region,
pixman_box32_t *box);
 
 
/* Copy / Fill / Misc */
pixman_bool_t pixman_blt (uint32_t *src_bits,
uint32_t *dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dst_x,
int dst_y,
int width,
int height);
pixman_bool_t pixman_fill (uint32_t *bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t _xor);
 
int pixman_version (void);
const char* pixman_version_string (void);
 
/*
* Images
*/
typedef struct pixman_indexed pixman_indexed_t;
typedef struct pixman_gradient_stop pixman_gradient_stop_t;
 
typedef uint32_t (* pixman_read_memory_func_t) (const void *src, int size);
typedef void (* pixman_write_memory_func_t) (void *dst, uint32_t value, int size);
 
typedef void (* pixman_image_destroy_func_t) (pixman_image_t *image, void *data);
 
struct pixman_gradient_stop {
pixman_fixed_t x;
pixman_color_t color;
};
 
#define PIXMAN_MAX_INDEXED 256 /* XXX depth must be <= 8 */
 
#if PIXMAN_MAX_INDEXED <= 256
typedef uint8_t pixman_index_type;
#endif
 
struct pixman_indexed
{
pixman_bool_t color;
uint32_t rgba[PIXMAN_MAX_INDEXED];
pixman_index_type ent[32768];
};
 
/*
* While the protocol is generous in format support, the
* sample implementation allows only packed RGB and GBR
* representations for data to simplify software rendering,
*/
#define PIXMAN_FORMAT(bpp,type,a,r,g,b) (((bpp) << 24) | \
((type) << 16) | \
((a) << 12) | \
((r) << 8) | \
((g) << 4) | \
((b)))
 
#define PIXMAN_FORMAT_BPP(f) (((f) >> 24) )
#define PIXMAN_FORMAT_TYPE(f) (((f) >> 16) & 0xff)
#define PIXMAN_FORMAT_A(f) (((f) >> 12) & 0x0f)
#define PIXMAN_FORMAT_R(f) (((f) >> 8) & 0x0f)
#define PIXMAN_FORMAT_G(f) (((f) >> 4) & 0x0f)
#define PIXMAN_FORMAT_B(f) (((f) ) & 0x0f)
#define PIXMAN_FORMAT_RGB(f) (((f) ) & 0xfff)
#define PIXMAN_FORMAT_VIS(f) (((f) ) & 0xffff)
#define PIXMAN_FORMAT_DEPTH(f) (PIXMAN_FORMAT_A(f) + \
PIXMAN_FORMAT_R(f) + \
PIXMAN_FORMAT_G(f) + \
PIXMAN_FORMAT_B(f))
 
#define PIXMAN_TYPE_OTHER 0
#define PIXMAN_TYPE_A 1
#define PIXMAN_TYPE_ARGB 2
#define PIXMAN_TYPE_ABGR 3
#define PIXMAN_TYPE_COLOR 4
#define PIXMAN_TYPE_GRAY 5
#define PIXMAN_TYPE_YUY2 6
#define PIXMAN_TYPE_YV12 7
#define PIXMAN_TYPE_BGRA 8
 
#define PIXMAN_FORMAT_COLOR(f) \
(PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ARGB || \
PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ABGR || \
PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_BGRA)
 
/* 32bpp formats */
typedef enum {
PIXMAN_a8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,8,8,8,8),
PIXMAN_x8r8g8b8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_a8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,8,8,8,8),
PIXMAN_x8b8g8r8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,8,8,8),
PIXMAN_b8g8r8a8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,8,8,8,8),
PIXMAN_b8g8r8x8 = PIXMAN_FORMAT(32,PIXMAN_TYPE_BGRA,0,8,8,8),
PIXMAN_x14r6g6b6 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,6,6,6),
PIXMAN_x2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,10,10,10),
PIXMAN_a2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,2,10,10,10),
PIXMAN_x2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,10,10,10),
PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10),
 
/* 24bpp formats */
PIXMAN_r8g8b8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_b8g8r8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ABGR,0,8,8,8),
 
/* 16bpp formats */
PIXMAN_r5g6b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,6,5),
PIXMAN_b5g6r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,6,5),
 
PIXMAN_a1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,1,5,5,5),
PIXMAN_x1r5g5b5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,5,5,5),
PIXMAN_a1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,1,5,5,5),
PIXMAN_x1b5g5r5 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,5,5,5),
PIXMAN_a4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,4,4,4,4),
PIXMAN_x4r4g4b4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ARGB,0,4,4,4),
PIXMAN_a4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,4,4,4,4),
PIXMAN_x4b4g4r4 = PIXMAN_FORMAT(16,PIXMAN_TYPE_ABGR,0,4,4,4),
 
/* 8bpp formats */
PIXMAN_a8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,8,0,0,0),
PIXMAN_r3g3b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,0,3,3,2),
PIXMAN_b2g3r3 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,0,3,3,2),
PIXMAN_a2r2g2b2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ARGB,2,2,2,2),
PIXMAN_a2b2g2r2 = PIXMAN_FORMAT(8,PIXMAN_TYPE_ABGR,2,2,2,2),
 
PIXMAN_c8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
 
PIXMAN_x4a4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,4,0,0,0),
 
PIXMAN_x4c4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_x4g4 = PIXMAN_FORMAT(8,PIXMAN_TYPE_GRAY,0,0,0,0),
 
/* 4bpp formats */
PIXMAN_a4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_A,4,0,0,0),
PIXMAN_r1g2b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,0,1,2,1),
PIXMAN_b1g2r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,0,1,2,1),
PIXMAN_a1r1g1b1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ARGB,1,1,1,1),
PIXMAN_a1b1g1r1 = PIXMAN_FORMAT(4,PIXMAN_TYPE_ABGR,1,1,1,1),
 
PIXMAN_c4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_COLOR,0,0,0,0),
PIXMAN_g4 = PIXMAN_FORMAT(4,PIXMAN_TYPE_GRAY,0,0,0,0),
 
/* 1bpp formats */
PIXMAN_a1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_A,1,0,0,0),
 
PIXMAN_g1 = PIXMAN_FORMAT(1,PIXMAN_TYPE_GRAY,0,0,0,0),
 
/* YUV formats */
PIXMAN_yuy2 = PIXMAN_FORMAT(16,PIXMAN_TYPE_YUY2,0,0,0,0),
PIXMAN_yv12 = PIXMAN_FORMAT(12,PIXMAN_TYPE_YV12,0,0,0,0)
} pixman_format_code_t;
 
/* Querying supported format values. */
pixman_bool_t pixman_format_supported_destination (pixman_format_code_t format);
pixman_bool_t pixman_format_supported_source (pixman_format_code_t format);
 
/* Constructors */
pixman_image_t *pixman_image_create_solid_fill (pixman_color_t *color);
pixman_image_t *pixman_image_create_linear_gradient (pixman_point_fixed_t *p1,
pixman_point_fixed_t *p2,
const pixman_gradient_stop_t *stops,
int n_stops);
pixman_image_t *pixman_image_create_radial_gradient (pixman_point_fixed_t *inner,
pixman_point_fixed_t *outer,
pixman_fixed_t inner_radius,
pixman_fixed_t outer_radius,
const pixman_gradient_stop_t *stops,
int n_stops);
pixman_image_t *pixman_image_create_conical_gradient (pixman_point_fixed_t *center,
pixman_fixed_t angle,
const pixman_gradient_stop_t *stops,
int n_stops);
pixman_image_t *pixman_image_create_bits (pixman_format_code_t format,
int width,
int height,
uint32_t *bits,
int rowstride_bytes);
 
/* Destructor */
pixman_image_t *pixman_image_ref (pixman_image_t *image);
pixman_bool_t pixman_image_unref (pixman_image_t *image);
 
void pixman_image_set_destroy_function (pixman_image_t *image,
pixman_image_destroy_func_t function,
void *data);
void * pixman_image_get_destroy_data (pixman_image_t *image);
 
/* Set properties */
pixman_bool_t pixman_image_set_clip_region (pixman_image_t *image,
pixman_region16_t *region);
pixman_bool_t pixman_image_set_clip_region32 (pixman_image_t *image,
pixman_region32_t *region);
void pixman_image_set_has_client_clip (pixman_image_t *image,
pixman_bool_t clien_clip);
pixman_bool_t pixman_image_set_transform (pixman_image_t *image,
const pixman_transform_t *transform);
void pixman_image_set_repeat (pixman_image_t *image,
pixman_repeat_t repeat);
pixman_bool_t pixman_image_set_filter (pixman_image_t *image,
pixman_filter_t filter,
const pixman_fixed_t *filter_params,
int n_filter_params);
void pixman_image_set_source_clipping (pixman_image_t *image,
pixman_bool_t source_clipping);
void pixman_image_set_alpha_map (pixman_image_t *image,
pixman_image_t *alpha_map,
int16_t x,
int16_t y);
void pixman_image_set_component_alpha (pixman_image_t *image,
pixman_bool_t component_alpha);
pixman_bool_t pixman_image_get_component_alpha (pixman_image_t *image);
void pixman_image_set_accessors (pixman_image_t *image,
pixman_read_memory_func_t read_func,
pixman_write_memory_func_t write_func);
void pixman_image_set_indexed (pixman_image_t *image,
const pixman_indexed_t *indexed);
uint32_t *pixman_image_get_data (pixman_image_t *image);
int pixman_image_get_width (pixman_image_t *image);
int pixman_image_get_height (pixman_image_t *image);
int pixman_image_get_stride (pixman_image_t *image); /* in bytes */
int pixman_image_get_depth (pixman_image_t *image);
pixman_format_code_t pixman_image_get_format (pixman_image_t *image);
pixman_bool_t pixman_image_fill_rectangles (pixman_op_t op,
pixman_image_t *image,
pixman_color_t *color,
int n_rects,
const pixman_rectangle16_t *rects);
pixman_bool_t pixman_image_fill_boxes (pixman_op_t op,
pixman_image_t *dest,
pixman_color_t *color,
int n_boxes,
const pixman_box32_t *boxes);
 
/* Composite */
pixman_bool_t pixman_compute_composite_region (pixman_region16_t *region,
pixman_image_t *src_image,
pixman_image_t *mask_image,
pixman_image_t *dst_image,
int16_t src_x,
int16_t src_y,
int16_t mask_x,
int16_t mask_y,
int16_t dest_x,
int16_t dest_y,
uint16_t width,
uint16_t height);
void pixman_image_composite (pixman_op_t op,
pixman_image_t *src,
pixman_image_t *mask,
pixman_image_t *dest,
int16_t src_x,
int16_t src_y,
int16_t mask_x,
int16_t mask_y,
int16_t dest_x,
int16_t dest_y,
uint16_t width,
uint16_t height);
void pixman_image_composite32 (pixman_op_t op,
pixman_image_t *src,
pixman_image_t *mask,
pixman_image_t *dest,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
 
/* Old X servers rely on out-of-bounds accesses when they are asked
* to composite with a window as the source. They create a pixman image
* pointing to some bogus position in memory, but then they set a clip
* region to the position where the actual bits are.
*
* Due to a bug in old versions of pixman, where it would not clip
* against the image bounds when a clip region was set, this would
* actually work. So by default we allow certain out-of-bound access
* to happen unless explicitly disabled.
*
* Fixed X servers should call this function to disable the workaround.
*/
void pixman_disable_out_of_bounds_workaround (void);
 
/*
* Trapezoids
*/
typedef struct pixman_edge pixman_edge_t;
typedef struct pixman_trapezoid pixman_trapezoid_t;
typedef struct pixman_trap pixman_trap_t;
typedef struct pixman_span_fix pixman_span_fix_t;
 
/*
* An edge structure. This represents a single polygon edge
* and can be quickly stepped across small or large gaps in the
* sample grid
*/
struct pixman_edge
{
pixman_fixed_t x;
pixman_fixed_t e;
pixman_fixed_t stepx;
pixman_fixed_t signdx;
pixman_fixed_t dy;
pixman_fixed_t dx;
 
pixman_fixed_t stepx_small;
pixman_fixed_t stepx_big;
pixman_fixed_t dx_small;
pixman_fixed_t dx_big;
};
 
struct pixman_trapezoid
{
pixman_fixed_t top, bottom;
pixman_line_fixed_t left, right;
};
 
 
/* whether 't' is a well defined not obviously empty trapezoid */
#define pixman_trapezoid_valid(t) \
((t)->left.p1.y != (t)->left.p2.y && \
(t)->right.p1.y != (t)->right.p2.y && \
(int) ((t)->bottom - (t)->top) > 0)
 
struct pixman_span_fix
{
pixman_fixed_t l, r, y;
};
 
struct pixman_trap
{
pixman_span_fix_t top, bot;
};
 
pixman_fixed_t pixman_sample_ceil_y (pixman_fixed_t y,
int bpp);
pixman_fixed_t pixman_sample_floor_y (pixman_fixed_t y,
int bpp);
void pixman_edge_step (pixman_edge_t *e,
int n);
void pixman_edge_init (pixman_edge_t *e,
int bpp,
pixman_fixed_t y_start,
pixman_fixed_t x_top,
pixman_fixed_t y_top,
pixman_fixed_t x_bot,
pixman_fixed_t y_bot);
void pixman_line_fixed_edge_init (pixman_edge_t *e,
int bpp,
pixman_fixed_t y,
const pixman_line_fixed_t *line,
int x_off,
int y_off);
void pixman_rasterize_edges (pixman_image_t *image,
pixman_edge_t *l,
pixman_edge_t *r,
pixman_fixed_t t,
pixman_fixed_t b);
void pixman_add_traps (pixman_image_t *image,
int16_t x_off,
int16_t y_off,
int ntrap,
pixman_trap_t *traps);
void pixman_add_trapezoids (pixman_image_t *image,
int16_t x_off,
int y_off,
int ntraps,
const pixman_trapezoid_t *traps);
void pixman_rasterize_trapezoid (pixman_image_t *image,
const pixman_trapezoid_t *trap,
int x_off,
int y_off);
 
PIXMAN_END_DECLS
 
#endif /* PIXMAN_H__ */
/programs/develop/libraries/pixman/test/window-test.c
0,0 → 1,173
#include <stdio.h>
#include <stdlib.h>
#include <config.h>
#include "pixman-private.h"
#include "pixman.h"
 
#define FALSE 0
#define TRUE 1
 
/* Randomly decide between 32 and 16 bit
*
* Allocate bits with random width, stride and height
*
* Then make up some random offset (dx, dy)
*
* Then make an image with those values.
*
* Do this for both source and destination
*
* Composite them together using OVER.
*
* The bits in the source and the destination should have
* recognizable colors so that the result can be verified.
*
* Ie., walk the bits and verify that they have been composited.
*/
 
static int
get_rand (int bound)
{
return rand () % bound;
}
 
static pixman_image_t *
make_image (int width, int height, pixman_bool_t src, int *rx, int *ry)
{
pixman_format_code_t format;
pixman_image_t *image;
pixman_region32_t region;
uint8_t *bits;
int stride;
int bpp;
int dx, dy;
int i, j;
 
if (src)
format = PIXMAN_a8r8g8b8;
else
format = PIXMAN_r5g6b5;
 
bpp = PIXMAN_FORMAT_BPP (format) / 8;
 
stride = width + get_rand (width);
stride += (stride & 1); /* Make it an even number */
 
bits = malloc (height * stride * bpp);
 
for (j = 0; j < height; ++j)
{
for (i = 0; i < width; ++i)
{
uint8_t *pixel = bits + (stride * j + i) * bpp;
 
if (src)
*(uint32_t *)pixel = 0x7f00007f;
else
*(uint16_t *)pixel = 0xf100;
}
}
 
dx = dy = 0;
 
dx = get_rand (500);
dy = get_rand (500);
 
if (!src)
{
/* Now simulate the bogus X server translations */
bits -= (dy * stride + dx) * bpp;
}
 
image = pixman_image_create_bits (
format, width, height, (uint32_t *)bits, stride * bpp);
 
if (!src)
{
/* And add the bogus clip region */
pixman_region32_init_rect (&region, dx, dy, dx + width, dy + height);
 
pixman_image_set_clip_region32 (image, &region);
}
 
pixman_image_set_source_clipping (image, TRUE);
 
if (src)
{
pixman_transform_t trans;
 
pixman_transform_init_identity (&trans);
 
pixman_transform_translate (&trans,
NULL,
- pixman_int_to_fixed (width / 2),
- pixman_int_to_fixed (height / 2));
 
pixman_transform_scale (&trans,
NULL,
pixman_double_to_fixed (0.5),
pixman_double_to_fixed (0.5));
 
pixman_transform_translate (&trans,
NULL,
pixman_int_to_fixed (width / 2),
pixman_int_to_fixed (height / 2));
 
pixman_image_set_transform (image, &trans);
pixman_image_set_filter (image, PIXMAN_FILTER_BILINEAR, NULL, 0);
pixman_image_set_repeat (image, PIXMAN_REPEAT_PAD);
}
 
if (!src)
{
*rx = dx;
*ry = dy;
}
else
{
*rx = *ry = 0;
}
 
return image;
}
 
int
main ()
{
pixman_image_t *src, *dest;
int src_x, src_y, dest_x, dest_y;
int i, j;
int width = get_rand (499) + 1;
int height = get_rand (499) + 1;
 
src = make_image (width, height, TRUE, &src_x, &src_y);
dest = make_image (width, height, FALSE, &dest_x, &dest_y);
 
pixman_image_composite (
PIXMAN_OP_OVER, src, NULL, dest,
src_x, src_y,
-1, -1,
dest_x, dest_y,
width, height);
 
for (i = 0; i < height; ++i)
{
for (j = 0; j < width; ++j)
{
uint8_t *bits = (uint8_t *)dest->bits.bits;
int bpp = PIXMAN_FORMAT_BPP (dest->bits.format) / 8;
int stride = dest->bits.rowstride * 4;
 
uint8_t *pixel =
bits + (i + dest_y) * stride + (j + dest_x) * bpp;
 
if (*(uint16_t *)pixel != 0x788f)
{
printf ("bad pixel %x\n", *(uint16_t *)pixel);
assert (*(uint16_t *)pixel == 0x788f);
}
}
}
 
return 0;
}
/programs/develop/libraries/pixman/test
Property changes:
Added: tsvn:logminsize
+5
\ No newline at end of property
/programs/develop/libraries/pixman
Property changes:
Added: tsvn:logminsize
+5
\ No newline at end of property