Subversion Repositories Kolibri OS

Rev

Rev 1892 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
  2. /* glitter-paths - polygon scan converter
  3.  *
  4.  * Copyright (c) 2008  M Joonas Pihlaja
  5.  * Copyright (c) 2007  David Turner
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person
  8.  * obtaining a copy of this software and associated documentation
  9.  * files (the "Software"), to deal in the Software without
  10.  * restriction, including without limitation the rights to use,
  11.  * copy, modify, merge, publish, distribute, sublicense, and/or sell
  12.  * copies of the Software, and to permit persons to whom the
  13.  * Software is furnished to do so, subject to the following
  14.  * conditions:
  15.  *
  16.  * The above copyright notice and this permission notice shall be
  17.  * included in all copies or substantial portions of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  20.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  21.  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  22.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
  23.  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  24.  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  25.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  26.  * OTHER DEALINGS IN THE SOFTWARE.
  27.  */
  28. /* This is the Glitter paths scan converter incorporated into cairo.
  29.  * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
  30.  * of
  31.  *
  32.  *   http://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
  33.  */
  34. /* Glitter-paths is a stand alone polygon rasteriser derived from
  35.  * David Turner's reimplementation of Tor Anderssons's 15x17
  36.  * supersampling rasteriser from the Apparition graphics library.  The
  37.  * main new feature here is cheaply choosing per-scan line between
  38.  * doing fully analytical coverage computation for an entire row at a
  39.  * time vs. using a supersampling approach.
  40.  *
  41.  * David Turner's code can be found at
  42.  *
  43.  *   http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
  44.  *
  45.  * In particular this file incorporates large parts of ftgrays_tor10.h
  46.  * from raster-comparison-20070813.tar.bz2
  47.  */
  48. /* Overview
  49.  *
  50.  * A scan converter's basic purpose to take polygon edges and convert
  51.  * them into an RLE compressed A8 mask.  This one works in two phases:
  52.  * gathering edges and generating spans.
  53.  *
  54.  * 1) As the user feeds the scan converter edges they are vertically
  55.  * clipped and bucketted into a _polygon_ data structure.  The edges
  56.  * are also snapped from the user's coordinates to the subpixel grid
  57.  * coordinates used during scan conversion.
  58.  *
  59.  *     user
  60.  *      |
  61.  *      | edges
  62.  *      V
  63.  *    polygon buckets
  64.  *
  65.  * 2) Generating spans works by performing a vertical sweep of pixel
  66.  * rows from top to bottom and maintaining an _active_list_ of edges
  67.  * that intersect the row.  From the active list the fill rule
  68.  * determines which edges are the left and right edges of the start of
  69.  * each span, and their contribution is then accumulated into a pixel
  70.  * coverage list (_cell_list_) as coverage deltas.  Once the coverage
  71.  * deltas of all edges are known we can form spans of constant pixel
  72.  * coverage by summing the deltas during a traversal of the cell list.
  73.  * At the end of a pixel row the cell list is sent to a coverage
  74.  * blitter for rendering to some target surface.
  75.  *
  76.  * The pixel coverages are computed by either supersampling the row
  77.  * and box filtering a mono rasterisation, or by computing the exact
  78.  * coverages of edges in the active list.  The supersampling method is
  79.  * used whenever some edge starts or stops within the row or there are
  80.  * edge intersections in the row.
  81.  *
  82.  *   polygon bucket for       \
  83.  *   current pixel row        |
  84.  *      |                     |
  85.  *      | activate new edges  |  Repeat GRID_Y times if we
  86.  *      V                     \  are supersampling this row,
  87.  *   active list              /  or just once if we're computing
  88.  *      |                     |  analytical coverage.
  89.  *      | coverage deltas     |
  90.  *      V                     |
  91.  *   pixel coverage list     /
  92.  *      |
  93.  *      V
  94.  *   coverage blitter
  95.  */
  96. #include "cairoint.h"
  97. #include "cairo-spans-private.h"
  98. #include "cairo-error-private.h"
  99.  
  100. #include <stdlib.h>
  101. #include <string.h>
  102. #include <limits.h>
  103. #include <setjmp.h>
  104.  
  105. /*-------------------------------------------------------------------------
  106.  * cairo specific config
  107.  */
  108. #define I static
  109.  
  110. /* Prefer cairo's status type. */
  111. #define GLITTER_HAVE_STATUS_T 1
  112. #define GLITTER_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
  113. #define GLITTER_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
  114. typedef cairo_status_t glitter_status_t;
  115.  
  116. /* The input coordinate scale and the rasterisation grid scales. */
  117. #define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
  118. #define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
  119. #define GRID_Y 15
  120.  
  121. /* Set glitter up to use a cairo span renderer to do the coverage
  122.  * blitting. */
  123. struct pool;
  124. struct cell_list;
  125.  
  126. /*-------------------------------------------------------------------------
  127.  * glitter-paths.h
  128.  */
  129.  
  130. /* "Input scaled" numbers are fixed precision reals with multiplier
  131.  * 2**GLITTER_INPUT_BITS.  Input coordinates are given to glitter as
  132.  * pixel scaled numbers.  These get converted to the internal grid
  133.  * scaled numbers as soon as possible. Internal overflow is possible
  134.  * if GRID_X/Y inside glitter-paths.c is larger than
  135.  * 1<<GLITTER_INPUT_BITS. */
  136. #ifndef GLITTER_INPUT_BITS
  137. #  define GLITTER_INPUT_BITS 8
  138. #endif
  139. #define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
  140. typedef int glitter_input_scaled_t;
  141.  
  142. #if !GLITTER_HAVE_STATUS_T
  143. typedef enum {
  144.     GLITTER_STATUS_SUCCESS = 0,
  145.     GLITTER_STATUS_NO_MEMORY
  146. } glitter_status_t;
  147. #endif
  148.  
  149. #ifndef I
  150. # define I /*static*/
  151. #endif
  152.  
  153. /* Opaque type for scan converting. */
  154. typedef struct glitter_scan_converter glitter_scan_converter_t;
  155.  
  156. /* Reset a scan converter to accept polygon edges and set the clip box
  157.  * in pixels.  Allocates O(ymax-ymin) bytes of memory.  The clip box
  158.  * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
  159.  * ymax. */
  160. I glitter_status_t
  161. glitter_scan_converter_reset(
  162.     glitter_scan_converter_t *converter,
  163.     int xmin, int ymin,
  164.     int xmax, int ymax);
  165.  
  166. /* Render the polygon in the scan converter to the given A8 format
  167.  * image raster.  Only the pixels accessible as pixels[y*stride+x] for
  168.  * x,y inside the clip box are written to, where xmin <= x < xmax,
  169.  * ymin <= y < ymax.  The image is assumed to be clear on input.
  170.  *
  171.  * If nonzero_fill is true then the interior of the polygon is
  172.  * computed with the non-zero fill rule.  Otherwise the even-odd fill
  173.  * rule is used.
  174.  *
  175.  * The scan converter must be reset or destroyed after this call. */
  176.  
  177. /*-------------------------------------------------------------------------
  178.  * glitter-paths.c: Implementation internal types
  179.  */
  180. #include <stdlib.h>
  181. #include <string.h>
  182. #include <limits.h>
  183.  
  184. /* All polygon coordinates are snapped onto a subsample grid. "Grid
  185.  * scaled" numbers are fixed precision reals with multiplier GRID_X or
  186.  * GRID_Y. */
  187. typedef int grid_scaled_t;
  188. typedef int grid_scaled_x_t;
  189. typedef int grid_scaled_y_t;
  190.  
  191. /* Default x/y scale factors.
  192.  *  You can either define GRID_X/Y_BITS to get a power-of-two scale
  193.  *  or define GRID_X/Y separately. */
  194. #if !defined(GRID_X) && !defined(GRID_X_BITS)
  195. #  define GRID_X_BITS 8
  196. #endif
  197. #if !defined(GRID_Y) && !defined(GRID_Y_BITS)
  198. #  define GRID_Y 15
  199. #endif
  200.  
  201. /* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
  202. #ifdef GRID_X_BITS
  203. #  define GRID_X (1 << GRID_X_BITS)
  204. #endif
  205. #ifdef GRID_Y_BITS
  206. #  define GRID_Y (1 << GRID_Y_BITS)
  207. #endif
  208.  
  209. /* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
  210.  * integer and fractional parts. The integer part is floored. */
  211. #if defined(GRID_X_TO_INT_FRAC)
  212.   /* do nothing */
  213. #elif defined(GRID_X_BITS)
  214. #  define GRID_X_TO_INT_FRAC(x, i, f) \
  215.         _GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
  216. #else
  217. #  define GRID_X_TO_INT_FRAC(x, i, f) \
  218.         _GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
  219. #endif
  220.  
  221. #define _GRID_TO_INT_FRAC_general(t, i, f, m) do {      \
  222.     (i) = (t) / (m);                                    \
  223.     (f) = (t) % (m);                                    \
  224.     if ((f) < 0) {                                      \
  225.         --(i);                                          \
  226.         (f) += (m);                                     \
  227.     }                                                   \
  228. } while (0)
  229.  
  230. #define _GRID_TO_INT_FRAC_shift(t, i, f, b) do {        \
  231.     (f) = (t) & ((1 << (b)) - 1);                       \
  232.     (i) = (t) >> (b);                                   \
  233. } while (0)
  234.  
  235. /* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y.  We want
  236.  * to be able to represent exactly areas of subpixel trapezoids whose
  237.  * vertices are given in grid scaled coordinates.  The scale factor
  238.  * comes from needing to accurately represent the area 0.5*dx*dy of a
  239.  * triangle with base dx and height dy in grid scaled numbers. */
  240. #define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
  241.  
  242. /* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
  243. #if GRID_XY == 510
  244. #  define GRID_AREA_TO_ALPHA(c)   (((c)+1) >> 1)
  245. #elif GRID_XY == 255
  246. #  define  GRID_AREA_TO_ALPHA(c)  (c)
  247. #elif GRID_XY == 64
  248. #  define  GRID_AREA_TO_ALPHA(c)  (((c) << 2) | -(((c) & 0x40) >> 6))
  249. #elif GRID_XY == 128
  250. #  define  GRID_AREA_TO_ALPHA(c)  ((((c) << 1) | -((c) >> 7)) & 255)
  251. #elif GRID_XY == 256
  252. #  define  GRID_AREA_TO_ALPHA(c)  (((c) | -((c) >> 8)) & 255)
  253. #elif GRID_XY == 15
  254. #  define  GRID_AREA_TO_ALPHA(c)  (((c) << 4) + (c))
  255. #elif GRID_XY == 2*256*15
  256. #  define  GRID_AREA_TO_ALPHA(c)  (((c) + ((c)<<4) + 256) >> 9)
  257. #else
  258. #  define  GRID_AREA_TO_ALPHA(c)  (((c)*255 + GRID_XY/2) / GRID_XY)
  259. #endif
  260.  
  261. #define UNROLL3(x) x x x
  262.  
  263. struct quorem {
  264.     int32_t quo;
  265.     int32_t rem;
  266. };
  267.  
  268. /* Header for a chunk of memory in a memory pool. */
  269. struct _pool_chunk {
  270.     /* # bytes used in this chunk. */
  271.     size_t size;
  272.  
  273.     /* # bytes total in this chunk */
  274.     size_t capacity;
  275.  
  276.     /* Pointer to the previous chunk or %NULL if this is the sentinel
  277.      * chunk in the pool header. */
  278.     struct _pool_chunk *prev_chunk;
  279.  
  280.     /* Actual data starts here.  Well aligned for pointers. */
  281. };
  282.  
  283. /* A memory pool.  This is supposed to be embedded on the stack or
  284.  * within some other structure.  It may optionally be followed by an
  285.  * embedded array from which requests are fulfilled until
  286.  * malloc needs to be called to allocate a first real chunk. */
  287. struct pool {
  288.     /* Chunk we're allocating from. */
  289.     struct _pool_chunk *current;
  290.  
  291.     jmp_buf *jmp;
  292.  
  293.     /* Free list of previously allocated chunks.  All have >= default
  294.      * capacity. */
  295.     struct _pool_chunk *first_free;
  296.  
  297.     /* The default capacity of a chunk. */
  298.     size_t default_capacity;
  299.  
  300.     /* Header for the sentinel chunk.  Directly following the pool
  301.      * struct should be some space for embedded elements from which
  302.      * the sentinel chunk allocates from. */
  303.     struct _pool_chunk sentinel[1];
  304. };
  305.  
  306. /* A polygon edge. */
  307. struct edge {
  308.     /* Next in y-bucket or active list. */
  309.     struct edge *next, *prev;
  310.  
  311.     /* Number of subsample rows remaining to scan convert of this
  312.      * edge. */
  313.     grid_scaled_y_t height_left;
  314.  
  315.     /* Original sign of the edge: +1 for downwards, -1 for upwards
  316.      * edges.  */
  317.     int dir;
  318.     int vertical;
  319.  
  320.     /* Current x coordinate while the edge is on the active
  321.      * list. Initialised to the x coordinate of the top of the
  322.      * edge. The quotient is in grid_scaled_x_t units and the
  323.      * remainder is mod dy in grid_scaled_y_t units.*/
  324.     struct quorem x;
  325.  
  326.     /* Advance of the current x when moving down a subsample line. */
  327.     struct quorem dxdy;
  328.  
  329.     /* Advance of the current x when moving down a full pixel
  330.      * row. Only initialised when the height of the edge is large
  331.      * enough that there's a chance the edge could be stepped by a
  332.      * full row's worth of subsample rows at a time. */
  333.     struct quorem dxdy_full;
  334.  
  335.     /* The clipped y of the top of the edge. */
  336.     grid_scaled_y_t ytop;
  337.  
  338.     /* y2-y1 after orienting the edge downwards.  */
  339.     grid_scaled_y_t dy;
  340. };
  341.  
  342. #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/GRID_Y)
  343.  
  344. /* A collection of sorted and vertically clipped edges of the polygon.
  345.  * Edges are moved from the polygon to an active list while scan
  346.  * converting. */
  347. struct polygon {
  348.     /* The vertical clip extents. */
  349.     grid_scaled_y_t ymin, ymax;
  350.  
  351.     /* Array of edges all starting in the same bucket.  An edge is put
  352.      * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
  353.      * it is added to the polygon. */
  354.     struct edge **y_buckets;
  355.     struct edge *y_buckets_embedded[64];
  356.  
  357.     struct {
  358.         struct pool base[1];
  359.         struct edge embedded[32];
  360.     } edge_pool;
  361. };
  362.  
  363. /* A cell records the effect on pixel coverage of polygon edges
  364.  * passing through a pixel.  It contains two accumulators of pixel
  365.  * coverage.
  366.  *
  367.  * Consider the effects of a polygon edge on the coverage of a pixel
  368.  * it intersects and that of the following one.  The coverage of the
  369.  * following pixel is the height of the edge multiplied by the width
  370.  * of the pixel, and the coverage of the pixel itself is the area of
  371.  * the trapezoid formed by the edge and the right side of the pixel.
  372.  *
  373.  * +-----------------------+-----------------------+
  374.  * |                       |                       |
  375.  * |                       |                       |
  376.  * |_______________________|_______________________|
  377.  * |   \...................|.......................|\
  378.  * |    \..................|.......................| |
  379.  * |     \.................|.......................| |
  380.  * |      \....covered.....|.......................| |
  381.  * |       \....area.......|.......................| } covered height
  382.  * |        \..............|.......................| |
  383.  * |uncovered\.............|.......................| |
  384.  * |  area    \............|.......................| |
  385.  * |___________\...........|.......................|/
  386.  * |                       |                       |
  387.  * |                       |                       |
  388.  * |                       |                       |
  389.  * +-----------------------+-----------------------+
  390.  *
  391.  * Since the coverage of the following pixel will always be a multiple
  392.  * of the width of the pixel, we can store the height of the covered
  393.  * area instead.  The coverage of the pixel itself is the total
  394.  * coverage minus the area of the uncovered area to the left of the
  395.  * edge.  As it's faster to compute the uncovered area we only store
  396.  * that and subtract it from the total coverage later when forming
  397.  * spans to blit.
  398.  *
  399.  * The heights and areas are signed, with left edges of the polygon
  400.  * having positive sign and right edges having negative sign.  When
  401.  * two edges intersect they swap their left/rightness so their
  402.  * contribution above and below the intersection point must be
  403.  * computed separately. */
  404. struct cell {
  405.     struct cell         *next;
  406.     int                  x;
  407.     int16_t              uncovered_area;
  408.     int16_t              covered_height;
  409. };
  410.  
  411. /* A cell list represents the scan line sparsely as cells ordered by
  412.  * ascending x.  It is geared towards scanning the cells in order
  413.  * using an internal cursor. */
  414. struct cell_list {
  415.     /* Sentinel nodes */
  416.     struct cell head, tail;
  417.  
  418.     /* Cursor state for iterating through the cell list. */
  419.     struct cell *cursor, *rewind;
  420.  
  421.     /* Cells in the cell list are owned by the cell list and are
  422.      * allocated from this pool.  */
  423.     struct {
  424.         struct pool base[1];
  425.         struct cell embedded[32];
  426.     } cell_pool;
  427. };
  428.  
  429. struct cell_pair {
  430.     struct cell *cell1;
  431.     struct cell *cell2;
  432. };
  433.  
  434. /* The active list contains edges in the current scan line ordered by
  435.  * the x-coordinate of the intercept of the edge and the scan line. */
  436. struct active_list {
  437.     /* Leftmost edge on the current scan line. */
  438.     struct edge head, tail;
  439.  
  440.     /* A lower bound on the height of the active edges is used to
  441.      * estimate how soon some active edge ends.  We can't advance the
  442.      * scan conversion by a full pixel row if an edge ends somewhere
  443.      * within it. */
  444.     grid_scaled_y_t min_height;
  445.     int is_vertical;
  446. };
  447.  
  448. struct glitter_scan_converter {
  449.     struct polygon      polygon[1];
  450.     struct active_list  active[1];
  451.     struct cell_list    coverages[1];
  452.  
  453.     cairo_half_open_span_t *spans;
  454.     cairo_half_open_span_t spans_embedded[64];
  455.  
  456.     /* Clip box. */
  457.     grid_scaled_x_t xmin, xmax;
  458.     grid_scaled_y_t ymin, ymax;
  459. };
  460.  
  461. /* Compute the floored division a/b. Assumes / and % perform symmetric
  462.  * division. */
  463. inline static struct quorem
  464. floored_divrem(int a, int b)
  465. {
  466.     struct quorem qr;
  467.     qr.quo = a/b;
  468.     qr.rem = a%b;
  469.     if ((a^b)<0 && qr.rem) {
  470.         qr.quo -= 1;
  471.         qr.rem += b;
  472.     }
  473.     return qr;
  474. }
  475.  
  476. /* Compute the floored division (x*a)/b. Assumes / and % perform symmetric
  477.  * division. */
  478. static struct quorem
  479. floored_muldivrem(int x, int a, int b)
  480. {
  481.     struct quorem qr;
  482.     long long xa = (long long)x*a;
  483.     qr.quo = xa/b;
  484.     qr.rem = xa%b;
  485.     if ((xa>=0) != (b>=0) && qr.rem) {
  486.         qr.quo -= 1;
  487.         qr.rem += b;
  488.     }
  489.     return qr;
  490. }
  491.  
  492. static struct _pool_chunk *
  493. _pool_chunk_init(
  494.     struct _pool_chunk *p,
  495.     struct _pool_chunk *prev_chunk,
  496.     size_t capacity)
  497. {
  498.     p->prev_chunk = prev_chunk;
  499.     p->size = 0;
  500.     p->capacity = capacity;
  501.     return p;
  502. }
  503.  
  504. static struct _pool_chunk *
  505. _pool_chunk_create(struct pool *pool, size_t size)
  506. {
  507.     struct _pool_chunk *p;
  508.  
  509.     p = malloc(size + sizeof(struct _pool_chunk));
  510.     if (unlikely (NULL == p))
  511.         longjmp (*pool->jmp, _cairo_error (CAIRO_STATUS_NO_MEMORY));
  512.  
  513.     return _pool_chunk_init(p, pool->current, size);
  514. }
  515.  
  516. static void
  517. pool_init(struct pool *pool,
  518.           jmp_buf *jmp,
  519.           size_t default_capacity,
  520.           size_t embedded_capacity)
  521. {
  522.     pool->jmp = jmp;
  523.     pool->current = pool->sentinel;
  524.     pool->first_free = NULL;
  525.     pool->default_capacity = default_capacity;
  526.     _pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
  527. }
  528.  
  529. static void
  530. pool_fini(struct pool *pool)
  531. {
  532.     struct _pool_chunk *p = pool->current;
  533.     do {
  534.         while (NULL != p) {
  535.             struct _pool_chunk *prev = p->prev_chunk;
  536.             if (p != pool->sentinel)
  537.                 free(p);
  538.             p = prev;
  539.         }
  540.         p = pool->first_free;
  541.         pool->first_free = NULL;
  542.     } while (NULL != p);
  543. }
  544.  
  545. /* Satisfy an allocation by first allocating a new large enough chunk
  546.  * and adding it to the head of the pool's chunk list. This function
  547.  * is called as a fallback if pool_alloc() couldn't do a quick
  548.  * allocation from the current chunk in the pool. */
  549. static void *
  550. _pool_alloc_from_new_chunk(
  551.     struct pool *pool,
  552.     size_t size)
  553. {
  554.     struct _pool_chunk *chunk;
  555.     void *obj;
  556.     size_t capacity;
  557.  
  558.     /* If the allocation is smaller than the default chunk size then
  559.      * try getting a chunk off the free list.  Force alloc of a new
  560.      * chunk for large requests. */
  561.     capacity = size;
  562.     chunk = NULL;
  563.     if (size < pool->default_capacity) {
  564.         capacity = pool->default_capacity;
  565.         chunk = pool->first_free;
  566.         if (chunk) {
  567.             pool->first_free = chunk->prev_chunk;
  568.             _pool_chunk_init(chunk, pool->current, chunk->capacity);
  569.         }
  570.     }
  571.  
  572.     if (NULL == chunk)
  573.         chunk = _pool_chunk_create (pool, capacity);
  574.     pool->current = chunk;
  575.  
  576.     obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
  577.     chunk->size += size;
  578.     return obj;
  579. }
  580.  
  581. /* Allocate size bytes from the pool.  The first allocated address
  582.  * returned from a pool is aligned to sizeof(void*).  Subsequent
  583.  * addresses will maintain alignment as long as multiples of void* are
  584.  * allocated.  Returns the address of a new memory area or %NULL on
  585.  * allocation failures.  The pool retains ownership of the returned
  586.  * memory. */
  587. inline static void *
  588. pool_alloc (struct pool *pool, size_t size)
  589. {
  590.     struct _pool_chunk *chunk = pool->current;
  591.  
  592.     if (size <= chunk->capacity - chunk->size) {
  593.         void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
  594.         chunk->size += size;
  595.         return obj;
  596.     } else {
  597.         return _pool_alloc_from_new_chunk(pool, size);
  598.     }
  599. }
  600.  
  601. /* Relinquish all pool_alloced memory back to the pool. */
  602. static void
  603. pool_reset (struct pool *pool)
  604. {
  605.     /* Transfer all used chunks to the chunk free list. */
  606.     struct _pool_chunk *chunk = pool->current;
  607.     if (chunk != pool->sentinel) {
  608.         while (chunk->prev_chunk != pool->sentinel) {
  609.             chunk = chunk->prev_chunk;
  610.         }
  611.         chunk->prev_chunk = pool->first_free;
  612.         pool->first_free = pool->current;
  613.     }
  614.     /* Reset the sentinel as the current chunk. */
  615.     pool->current = pool->sentinel;
  616.     pool->sentinel->size = 0;
  617. }
  618.  
  619. /* Rewinds the cell list's cursor to the beginning.  After rewinding
  620.  * we're good to cell_list_find() the cell any x coordinate. */
  621. inline static void
  622. cell_list_rewind (struct cell_list *cells)
  623. {
  624.     cells->cursor = &cells->head;
  625. }
  626.  
  627. inline static void
  628. cell_list_maybe_rewind (struct cell_list *cells, int x)
  629. {
  630.     if (x < cells->cursor->x) {
  631.         cells->cursor = cells->rewind;
  632.         if (x < cells->cursor->x)
  633.             cells->cursor = &cells->head;
  634.     }
  635. }
  636.  
  637. inline static void
  638. cell_list_set_rewind (struct cell_list *cells)
  639. {
  640.     cells->rewind = cells->cursor;
  641. }
  642.  
  643. static void
  644. cell_list_init(struct cell_list *cells, jmp_buf *jmp)
  645. {
  646.     pool_init(cells->cell_pool.base, jmp,
  647.               256*sizeof(struct cell),
  648.               sizeof(cells->cell_pool.embedded));
  649.     cells->tail.next = NULL;
  650.     cells->tail.x = INT_MAX;
  651.     cells->head.x = INT_MIN;
  652.     cells->head.next = &cells->tail;
  653.     cell_list_rewind (cells);
  654. }
  655.  
  656. static void
  657. cell_list_fini(struct cell_list *cells)
  658. {
  659.     pool_fini (cells->cell_pool.base);
  660. }
  661.  
  662. /* Empty the cell list.  This is called at the start of every pixel
  663.  * row. */
  664. inline static void
  665. cell_list_reset (struct cell_list *cells)
  666. {
  667.     cell_list_rewind (cells);
  668.     cells->head.next = &cells->tail;
  669.     pool_reset (cells->cell_pool.base);
  670. }
  671.  
  672. inline static struct cell *
  673. cell_list_alloc (struct cell_list *cells,
  674.                  struct cell *tail,
  675.                  int x)
  676. {
  677.     struct cell *cell;
  678.  
  679.     cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
  680.     cell->next = tail->next;
  681.     tail->next = cell;
  682.     cell->x = x;
  683.     *(uint32_t *)&cell->uncovered_area = 0;
  684.  
  685.     return cell;
  686. }
  687.  
  688. /* Find a cell at the given x-coordinate.  Returns %NULL if a new cell
  689.  * needed to be allocated but couldn't be.  Cells must be found with
  690.  * non-decreasing x-coordinate until the cell list is rewound using
  691.  * cell_list_rewind(). Ownership of the returned cell is retained by
  692.  * the cell list. */
  693. inline static struct cell *
  694. cell_list_find (struct cell_list *cells, int x)
  695. {
  696.     struct cell *tail = cells->cursor;
  697.  
  698.     if (tail->x == x)
  699.         return tail;
  700.  
  701.     while (1) {
  702.         UNROLL3({
  703.                 if (tail->next->x > x)
  704.                         break;
  705.                 tail = tail->next;
  706.         });
  707.     }
  708.  
  709.     if (tail->x != x)
  710.         tail = cell_list_alloc (cells, tail, x);
  711.     return cells->cursor = tail;
  712.  
  713. }
  714.  
  715. /* Find two cells at x1 and x2.  This is exactly equivalent
  716.  * to
  717.  *
  718.  *   pair.cell1 = cell_list_find(cells, x1);
  719.  *   pair.cell2 = cell_list_find(cells, x2);
  720.  *
  721.  * except with less function call overhead. */
  722. inline static struct cell_pair
  723. cell_list_find_pair(struct cell_list *cells, int x1, int x2)
  724. {
  725.     struct cell_pair pair;
  726.  
  727.     pair.cell1 = cells->cursor;
  728.     while (1) {
  729.         UNROLL3({
  730.                 if (pair.cell1->next->x > x1)
  731.                         break;
  732.                 pair.cell1 = pair.cell1->next;
  733.         });
  734.     }
  735.     if (pair.cell1->x != x1)
  736.         pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
  737.  
  738.     pair.cell2 = pair.cell1;
  739.     while (1) {
  740.         UNROLL3({
  741.                 if (pair.cell2->next->x > x2)
  742.                         break;
  743.                 pair.cell2 = pair.cell2->next;
  744.         });
  745.     }
  746.     if (pair.cell2->x != x2)
  747.         pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
  748.  
  749.     cells->cursor = pair.cell2;
  750.     return pair;
  751. }
  752.  
  753. /* Add a subpixel span covering [x1, x2) to the coverage cells. */
  754. inline static void
  755. cell_list_add_subspan(struct cell_list *cells,
  756.                       grid_scaled_x_t x1,
  757.                       grid_scaled_x_t x2)
  758. {
  759.     int ix1, fx1;
  760.     int ix2, fx2;
  761.  
  762.     if (x1 == x2)
  763.         return;
  764.  
  765.     GRID_X_TO_INT_FRAC(x1, ix1, fx1);
  766.     GRID_X_TO_INT_FRAC(x2, ix2, fx2);
  767.  
  768.     if (ix1 != ix2) {
  769.         struct cell_pair p;
  770.         p = cell_list_find_pair(cells, ix1, ix2);
  771.         p.cell1->uncovered_area += 2*fx1;
  772.         ++p.cell1->covered_height;
  773.         p.cell2->uncovered_area -= 2*fx2;
  774.         --p.cell2->covered_height;
  775.     } else {
  776.         struct cell *cell = cell_list_find(cells, ix1);
  777.         cell->uncovered_area += 2*(fx1-fx2);
  778.     }
  779. }
  780.  
  781. /* Adds the analytical coverage of an edge crossing the current pixel
  782.  * row to the coverage cells and advances the edge's x position to the
  783.  * following row.
  784.  *
  785.  * This function is only called when we know that during this pixel row:
  786.  *
  787.  * 1) The relative order of all edges on the active list doesn't
  788.  * change.  In particular, no edges intersect within this row to pixel
  789.  * precision.
  790.  *
  791.  * 2) No new edges start in this row.
  792.  *
  793.  * 3) No existing edges end mid-row.
  794.  *
  795.  * This function depends on being called with all edges from the
  796.  * active list in the order they appear on the list (i.e. with
  797.  * non-decreasing x-coordinate.)  */
  798. static void
  799. cell_list_render_edge(struct cell_list *cells,
  800.                       struct edge *edge,
  801.                       int sign)
  802. {
  803.     grid_scaled_y_t y1, y2, dy;
  804.     grid_scaled_x_t dx;
  805.     int ix1, ix2;
  806.     grid_scaled_x_t fx1, fx2;
  807.  
  808.     struct quorem x1 = edge->x;
  809.     struct quorem x2 = x1;
  810.  
  811.     if (! edge->vertical) {
  812.         x2.quo += edge->dxdy_full.quo;
  813.         x2.rem += edge->dxdy_full.rem;
  814.         if (x2.rem >= 0) {
  815.             ++x2.quo;
  816.             x2.rem -= edge->dy;
  817.         }
  818.  
  819.         edge->x = x2;
  820.     }
  821.  
  822.     GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1);
  823.     GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2);
  824.  
  825.     /* Edge is entirely within a column? */
  826.     if (ix1 == ix2) {
  827.         /* We always know that ix1 is >= the cell list cursor in this
  828.          * case due to the no-intersections precondition.  */
  829.         struct cell *cell = cell_list_find(cells, ix1);
  830.         cell->covered_height += sign*GRID_Y;
  831.         cell->uncovered_area += sign*(fx1 + fx2)*GRID_Y;
  832.         return;
  833.     }
  834.  
  835.     /* Orient the edge left-to-right. */
  836.     dx = x2.quo - x1.quo;
  837.     if (dx >= 0) {
  838.         y1 = 0;
  839.         y2 = GRID_Y;
  840.     } else {
  841.         int tmp;
  842.         tmp = ix1; ix1 = ix2; ix2 = tmp;
  843.         tmp = fx1; fx1 = fx2; fx2 = tmp;
  844.         dx = -dx;
  845.         sign = -sign;
  846.         y1 = GRID_Y;
  847.         y2 = 0;
  848.     }
  849.     dy = y2 - y1;
  850.  
  851.     /* Add coverage for all pixels [ix1,ix2] on this row crossed
  852.      * by the edge. */
  853.     {
  854.         struct cell_pair pair;
  855.         struct quorem y = floored_divrem((GRID_X - fx1)*dy, dx);
  856.  
  857.         /* When rendering a previous edge on the active list we may
  858.          * advance the cell list cursor past the leftmost pixel of the
  859.          * current edge even though the two edges don't intersect.
  860.          * e.g. consider two edges going down and rightwards:
  861.          *
  862.          *  --\_+---\_+-----+-----+----
  863.          *      \_    \_    |     |
  864.          *      | \_  | \_  |     |
  865.          *      |   \_|   \_|     |
  866.          *      |     \_    \_    |
  867.          *  ----+-----+-\---+-\---+----
  868.          *
  869.          * The left edge touches cells past the starting cell of the
  870.          * right edge.  Fortunately such cases are rare.
  871.          *
  872.          * The rewinding is never necessary if the current edge stays
  873.          * within a single column because we've checked before calling
  874.          * this function that the active list order won't change. */
  875.         cell_list_maybe_rewind(cells, ix1);
  876.  
  877.         pair = cell_list_find_pair(cells, ix1, ix1+1);
  878.         pair.cell1->uncovered_area += sign*y.quo*(GRID_X + fx1);
  879.         pair.cell1->covered_height += sign*y.quo;
  880.         y.quo += y1;
  881.  
  882.         if (ix1+1 < ix2) {
  883.             struct quorem dydx_full = floored_divrem(GRID_X*dy, dx);
  884.             struct cell *cell = pair.cell2;
  885.  
  886.             ++ix1;
  887.             do {
  888.                 grid_scaled_y_t y_skip = dydx_full.quo;
  889.                 y.rem += dydx_full.rem;
  890.                 if (y.rem >= dx) {
  891.                     ++y_skip;
  892.                     y.rem -= dx;
  893.                 }
  894.  
  895.                 y.quo += y_skip;
  896.  
  897.                 y_skip *= sign;
  898.                 cell->uncovered_area += y_skip*GRID_X;
  899.                 cell->covered_height += y_skip;
  900.  
  901.                 ++ix1;
  902.                 cell = cell_list_find(cells, ix1);
  903.             } while (ix1 != ix2);
  904.  
  905.             pair.cell2 = cell;
  906.         }
  907.         pair.cell2->uncovered_area += sign*(y2 - y.quo)*fx2;
  908.         pair.cell2->covered_height += sign*(y2 - y.quo);
  909.     }
  910. }
  911.  
  912. static void
  913. polygon_init (struct polygon *polygon, jmp_buf *jmp)
  914. {
  915.     polygon->ymin = polygon->ymax = 0;
  916.     polygon->y_buckets = polygon->y_buckets_embedded;
  917.     pool_init (polygon->edge_pool.base, jmp,
  918.                8192 - sizeof (struct _pool_chunk),
  919.                sizeof (polygon->edge_pool.embedded));
  920. }
  921.  
  922. static void
  923. polygon_fini (struct polygon *polygon)
  924. {
  925.     if (polygon->y_buckets != polygon->y_buckets_embedded)
  926.         free (polygon->y_buckets);
  927.  
  928.     pool_fini (polygon->edge_pool.base);
  929. }
  930.  
  931. /* Empties the polygon of all edges. The polygon is then prepared to
  932.  * receive new edges and clip them to the vertical range
  933.  * [ymin,ymax). */
  934. static glitter_status_t
  935. polygon_reset (struct polygon *polygon,
  936.                grid_scaled_y_t ymin,
  937.                grid_scaled_y_t ymax)
  938. {
  939.     unsigned h = ymax - ymin;
  940.     unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin);
  941.  
  942.     pool_reset(polygon->edge_pool.base);
  943.  
  944.     if (unlikely (h > 0x7FFFFFFFU - GRID_Y))
  945.         goto bail_no_mem; /* even if you could, you wouldn't want to. */
  946.  
  947.     if (polygon->y_buckets != polygon->y_buckets_embedded)
  948.         free (polygon->y_buckets);
  949.  
  950.     polygon->y_buckets =  polygon->y_buckets_embedded;
  951.     if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
  952.         polygon->y_buckets = _cairo_malloc_ab (num_buckets,
  953.                                                sizeof (struct edge *));
  954.         if (unlikely (NULL == polygon->y_buckets))
  955.             goto bail_no_mem;
  956.     }
  957.     memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
  958.  
  959.     polygon->ymin = ymin;
  960.     polygon->ymax = ymax;
  961.     return GLITTER_STATUS_SUCCESS;
  962.  
  963. bail_no_mem:
  964.     polygon->ymin = 0;
  965.     polygon->ymax = 0;
  966.     return GLITTER_STATUS_NO_MEMORY;
  967. }
  968.  
  969. static void
  970. _polygon_insert_edge_into_its_y_bucket(struct polygon *polygon,
  971.                                        struct edge *e)
  972. {
  973.     unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
  974.     struct edge **ptail = &polygon->y_buckets[ix];
  975.     e->next = *ptail;
  976.     *ptail = e;
  977. }
  978.  
  979. inline static void
  980. polygon_add_edge (struct polygon *polygon,
  981.                   const cairo_edge_t *edge)
  982. {
  983.     struct edge *e;
  984.     grid_scaled_x_t dx;
  985.     grid_scaled_y_t dy;
  986.     grid_scaled_y_t ytop, ybot;
  987.     grid_scaled_y_t ymin = polygon->ymin;
  988.     grid_scaled_y_t ymax = polygon->ymax;
  989.  
  990.     if (unlikely (edge->top >= ymax || edge->bottom <= ymin))
  991.         return;
  992.  
  993.     e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
  994.  
  995.     dx = edge->line.p2.x - edge->line.p1.x;
  996.     dy = edge->line.p2.y - edge->line.p1.y;
  997.     e->dy = dy;
  998.     e->dir = edge->dir;
  999.  
  1000.     ytop = edge->top >= ymin ? edge->top : ymin;
  1001.     ybot = edge->bottom <= ymax ? edge->bottom : ymax;
  1002.     e->ytop = ytop;
  1003.     e->height_left = ybot - ytop;
  1004.  
  1005.     if (dx == 0) {
  1006.         e->vertical = TRUE;
  1007.         e->x.quo = edge->line.p1.x;
  1008.         e->x.rem = 0;
  1009.         e->dxdy.quo = 0;
  1010.         e->dxdy.rem = 0;
  1011.         e->dxdy_full.quo = 0;
  1012.         e->dxdy_full.rem = 0;
  1013.     } else {
  1014.         e->vertical = FALSE;
  1015.         e->dxdy = floored_divrem (dx, dy);
  1016.         if (ytop == edge->line.p1.y) {
  1017.             e->x.quo = edge->line.p1.x;
  1018.             e->x.rem = 0;
  1019.         } else {
  1020.             e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
  1021.             e->x.quo += edge->line.p1.x;
  1022.         }
  1023.  
  1024.         if (e->height_left >= GRID_Y) {
  1025.             e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy);
  1026.         } else {
  1027.             e->dxdy_full.quo = 0;
  1028.             e->dxdy_full.rem = 0;
  1029.         }
  1030.     }
  1031.  
  1032.     _polygon_insert_edge_into_its_y_bucket (polygon, e);
  1033.  
  1034.     e->x.rem -= dy;             /* Bias the remainder for faster
  1035.                                  * edge advancement. */
  1036. }
  1037.  
  1038. static void
  1039. active_list_reset (struct active_list *active)
  1040. {
  1041.     active->head.vertical = 1;
  1042.     active->head.height_left = INT_MAX;
  1043.     active->head.x.quo = INT_MIN;
  1044.     active->head.prev = NULL;
  1045.     active->head.next = &active->tail;
  1046.     active->tail.prev = &active->head;
  1047.     active->tail.next = NULL;
  1048.     active->tail.x.quo = INT_MAX;
  1049.     active->tail.height_left = INT_MAX;
  1050.     active->tail.vertical = 1;
  1051.     active->min_height = 0;
  1052.     active->is_vertical = 1;
  1053. }
  1054.  
  1055. static void
  1056. active_list_init(struct active_list *active)
  1057. {
  1058.     active_list_reset(active);
  1059. }
  1060.  
  1061. /*
  1062.  * Merge two sorted edge lists.
  1063.  * Input:
  1064.  *  - head_a: The head of the first list.
  1065.  *  - head_b: The head of the second list; head_b cannot be NULL.
  1066.  * Output:
  1067.  * Returns the head of the merged list.
  1068.  *
  1069.  * Implementation notes:
  1070.  * To make it fast (in particular, to reduce to an insertion sort whenever
  1071.  * one of the two input lists only has a single element) we iterate through
  1072.  * a list until its head becomes greater than the head of the other list,
  1073.  * then we switch their roles. As soon as one of the two lists is empty, we
  1074.  * just attach the other one to the current list and exit.
  1075.  * Writes to memory are only needed to "switch" lists (as it also requires
  1076.  * attaching to the output list the list which we will be iterating next) and
  1077.  * to attach the last non-empty list.
  1078.  */
  1079. static struct edge *
  1080. merge_sorted_edges (struct edge *head_a, struct edge *head_b)
  1081. {
  1082.     struct edge *head, **next, *prev;
  1083.     int32_t x;
  1084.  
  1085.     prev = head_a->prev;
  1086.     next = &head;
  1087.     if (head_a->x.quo <= head_b->x.quo) {
  1088.         head = head_a;
  1089.     } else {
  1090.         head = head_b;
  1091.         head_b->prev = prev;
  1092.         goto start_with_b;
  1093.     }
  1094.  
  1095.     do {
  1096.         x = head_b->x.quo;
  1097.         while (head_a != NULL && head_a->x.quo <= x) {
  1098.             prev = head_a;
  1099.             next = &head_a->next;
  1100.             head_a = head_a->next;
  1101.         }
  1102.  
  1103.         head_b->prev = prev;
  1104.         *next = head_b;
  1105.         if (head_a == NULL)
  1106.             return head;
  1107.  
  1108. start_with_b:
  1109.         x = head_a->x.quo;
  1110.         while (head_b != NULL && head_b->x.quo <= x) {
  1111.             prev = head_b;
  1112.             next = &head_b->next;
  1113.             head_b = head_b->next;
  1114.         }
  1115.  
  1116.         head_a->prev = prev;
  1117.         *next = head_a;
  1118.         if (head_b == NULL)
  1119.             return head;
  1120.     } while (1);
  1121. }
  1122.  
  1123. /*
  1124.  * Sort (part of) a list.
  1125.  * Input:
  1126.  *  - list: The list to be sorted; list cannot be NULL.
  1127.  *  - limit: Recursion limit.
  1128.  * Output:
  1129.  *  - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
  1130.  *              input list; if the input list has fewer elements, head_out be a sorted list
  1131.  *              containing all the elements of the input list.
  1132.  * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
  1133.  * all the elements of the input list).
  1134.  *
  1135.  * Implementation notes:
  1136.  * Special case single element list, unroll/inline the sorting of the first two elements.
  1137.  * Some tail recursion is used since we iterate on the bottom-up solution of the problem
  1138.  * (we start with a small sorted list and keep merging other lists of the same size to it).
  1139.  */
  1140. static struct edge *
  1141. sort_edges (struct edge *list,
  1142.             unsigned int level,
  1143.             struct edge **head_out)
  1144. {
  1145.     struct edge *head_other, *remaining;
  1146.     unsigned int i;
  1147.  
  1148.     head_other = list->next;
  1149.  
  1150.     if (head_other == NULL) {
  1151.         *head_out = list;
  1152.         return NULL;
  1153.     }
  1154.  
  1155.     remaining = head_other->next;
  1156.     if (list->x.quo <= head_other->x.quo) {
  1157.         *head_out = list;
  1158.         head_other->next = NULL;
  1159.     } else {
  1160.         *head_out = head_other;
  1161.         head_other->prev = list->prev;
  1162.         head_other->next = list;
  1163.         list->prev = head_other;
  1164.         list->next = NULL;
  1165.     }
  1166.  
  1167.     for (i = 0; i < level && remaining; i++) {
  1168.         remaining = sort_edges (remaining, i, &head_other);
  1169.         *head_out = merge_sorted_edges (*head_out, head_other);
  1170.     }
  1171.  
  1172.     return remaining;
  1173. }
  1174.  
  1175.  static struct edge *
  1176. merge_unsorted_edges (struct edge *head, struct edge *unsorted)
  1177. {
  1178.     sort_edges (unsorted, UINT_MAX, &unsorted);
  1179.     return merge_sorted_edges (head, unsorted);
  1180. }
  1181.  
  1182. /* Test if the edges on the active list can be safely advanced by a
  1183.  * full row without intersections or any edges ending. */
  1184. inline static int
  1185. can_do_full_row (struct active_list *active)
  1186. {
  1187.     const struct edge *e;
  1188.     int prev_x = INT_MIN;
  1189.  
  1190.     /* Recomputes the minimum height of all edges on the active
  1191.      * list if we have been dropping edges. */
  1192.     if (active->min_height <= 0) {
  1193.         int min_height = INT_MAX;
  1194.         int is_vertical = 1;
  1195.  
  1196.         e = active->head.next;
  1197.         while (NULL != e) {
  1198.             if (e->height_left < min_height)
  1199.                 min_height = e->height_left;
  1200.             is_vertical &= e->vertical;
  1201.             e = e->next;
  1202.         }
  1203.  
  1204.         active->is_vertical = is_vertical;
  1205.         active->min_height = min_height;
  1206.     }
  1207.  
  1208.     if (active->min_height < GRID_Y)
  1209.         return 0;
  1210.  
  1211.     /* Check for intersections as no edges end during the next row. */
  1212.     for (e = active->head.next; e != &active->tail; e = e->next) {
  1213.         struct quorem x = e->x;
  1214.  
  1215.         if (! e->vertical) {
  1216.             x.quo += e->dxdy_full.quo;
  1217.             x.rem += e->dxdy_full.rem;
  1218.             if (x.rem >= 0)
  1219.                 ++x.quo;
  1220.         }
  1221.  
  1222.         if (x.quo < prev_x)
  1223.             return 0;
  1224.  
  1225.         prev_x = x.quo;
  1226.     }
  1227.  
  1228.     return 1;
  1229. }
  1230.  
  1231. /* Merges edges on the given subpixel row from the polygon to the
  1232.  * active_list. */
  1233. inline static void
  1234. active_list_merge_edges_from_bucket(struct active_list *active,
  1235.                                     struct edge *edges)
  1236. {
  1237.     active->head.next = merge_unsorted_edges (active->head.next, edges);
  1238. }
  1239.  
  1240. inline static void
  1241. polygon_fill_buckets (struct active_list *active,
  1242.                       struct edge *edge,
  1243.                       int y,
  1244.                       struct edge **buckets)
  1245. {
  1246.     grid_scaled_y_t min_height = active->min_height;
  1247.     int is_vertical = active->is_vertical;
  1248.  
  1249.     while (edge) {
  1250.         struct edge *next = edge->next;
  1251.         int suby = edge->ytop - y;
  1252.         if (buckets[suby])
  1253.             buckets[suby]->prev = edge;
  1254.         edge->next = buckets[suby];
  1255.         edge->prev = NULL;
  1256.         buckets[suby] = edge;
  1257.         if (edge->height_left < min_height)
  1258.             min_height = edge->height_left;
  1259.         is_vertical &= edge->vertical;
  1260.         edge = next;
  1261.     }
  1262.  
  1263.     active->is_vertical = is_vertical;
  1264.     active->min_height = min_height;
  1265. }
  1266.  
  1267. inline static void
  1268. sub_row (struct active_list *active,
  1269.          struct cell_list *coverages,
  1270.          unsigned int mask)
  1271. {
  1272.     struct edge *edge = active->head.next;
  1273.     int xstart = INT_MIN, prev_x = INT_MIN;
  1274.     int winding = 0;
  1275.  
  1276.     cell_list_rewind (coverages);
  1277.  
  1278.     while (&active->tail != edge) {
  1279.         struct edge *next = edge->next;
  1280.         int xend = edge->x.quo;
  1281.  
  1282.         if (--edge->height_left) {
  1283.             edge->x.quo += edge->dxdy.quo;
  1284.             edge->x.rem += edge->dxdy.rem;
  1285.             if (edge->x.rem >= 0) {
  1286.                 ++edge->x.quo;
  1287.                 edge->x.rem -= edge->dy;
  1288.             }
  1289.  
  1290.             if (edge->x.quo < prev_x) {
  1291.                 struct edge *pos = edge->prev;
  1292.                 pos->next = next;
  1293.                 next->prev = pos;
  1294.                 do {
  1295.                     pos = pos->prev;
  1296.                 } while (edge->x.quo < pos->x.quo);
  1297.                 pos->next->prev = edge;
  1298.                 edge->next = pos->next;
  1299.                 edge->prev = pos;
  1300.                 pos->next = edge;
  1301.             } else
  1302.                 prev_x = edge->x.quo;
  1303.             active->min_height = -1;
  1304.         } else {
  1305.             edge->prev->next = next;
  1306.             next->prev = edge->prev;
  1307.         }
  1308.  
  1309.         winding += edge->dir;
  1310.         if ((winding & mask) == 0) {
  1311.             if (next->x.quo != xend) {
  1312.                 cell_list_add_subspan (coverages, xstart, xend);
  1313.                 xstart = INT_MIN;
  1314.             }
  1315.         } else if (xstart == INT_MIN)
  1316.             xstart = xend;
  1317.  
  1318.         edge = next;
  1319.     }
  1320. }
  1321.  
  1322. inline static void dec (struct active_list *a, struct edge *e, int h)
  1323. {
  1324.     e->height_left -= h;
  1325.     if (e->height_left == 0) {
  1326.         e->prev->next = e->next;
  1327.         e->next->prev = e->prev;
  1328.         a->min_height = -1;
  1329.     }
  1330. }
  1331.  
  1332. inline static void full_step (struct edge *e)
  1333. {
  1334.     if (! e->vertical) {
  1335.         e->x.quo += e->dxdy_full.quo;
  1336.         e->x.rem += e->dxdy_full.rem;
  1337.         if (e->x.rem >= 0) {
  1338.             ++e->x.quo;
  1339.             e->x.rem -= e->dy;
  1340.         }
  1341.     }
  1342. }
  1343.  
  1344. static void
  1345. full_row (struct active_list *active,
  1346.           struct cell_list *coverages,
  1347.           unsigned int mask)
  1348. {
  1349.     struct edge *left = active->head.next;
  1350.  
  1351.     while (&active->tail != left) {
  1352.         struct edge *right;
  1353.         int winding;
  1354.  
  1355.         dec (active, left, GRID_Y);
  1356.  
  1357.         winding = left->dir;
  1358.         right = left->next;
  1359.         do {
  1360.             dec (active, right, GRID_Y);
  1361.  
  1362.             winding += right->dir;
  1363.             if ((winding & mask) == 0 && right->next->x.quo != right->x.quo)
  1364.                 break;
  1365.  
  1366.             full_step (right);
  1367.  
  1368.             right = right->next;
  1369.         } while (1);
  1370.  
  1371.         cell_list_set_rewind (coverages);
  1372.         cell_list_render_edge (coverages, left, +1);
  1373.         cell_list_render_edge (coverages, right, -1);
  1374.  
  1375.         left = right->next;
  1376.     }
  1377. }
  1378.  
  1379. static void
  1380. _glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
  1381. {
  1382.     polygon_init(converter->polygon, jmp);
  1383.     active_list_init(converter->active);
  1384.     cell_list_init(converter->coverages, jmp);
  1385.     converter->xmin=0;
  1386.     converter->ymin=0;
  1387.     converter->xmax=0;
  1388.     converter->ymax=0;
  1389. }
  1390.  
  1391. static void
  1392. _glitter_scan_converter_fini(glitter_scan_converter_t *self)
  1393. {
  1394.     if (self->spans != self->spans_embedded)
  1395.         free (self->spans);
  1396.  
  1397.     polygon_fini(self->polygon);
  1398.     cell_list_fini(self->coverages);
  1399.  
  1400.     self->xmin=0;
  1401.     self->ymin=0;
  1402.     self->xmax=0;
  1403.     self->ymax=0;
  1404. }
  1405.  
  1406. static grid_scaled_t
  1407. int_to_grid_scaled(int i, int scale)
  1408. {
  1409.     /* Clamp to max/min representable scaled number. */
  1410.     if (i >= 0) {
  1411.         if (i >= INT_MAX/scale)
  1412.             i = INT_MAX/scale;
  1413.     }
  1414.     else {
  1415.         if (i <= INT_MIN/scale)
  1416.             i = INT_MIN/scale;
  1417.     }
  1418.     return i*scale;
  1419. }
  1420.  
  1421. #define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
  1422. #define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
  1423.  
  1424. I glitter_status_t
  1425. glitter_scan_converter_reset(
  1426.                              glitter_scan_converter_t *converter,
  1427.                              int xmin, int ymin,
  1428.                              int xmax, int ymax)
  1429. {
  1430.     glitter_status_t status;
  1431.     int max_num_spans;
  1432.  
  1433.     converter->xmin = 0; converter->xmax = 0;
  1434.     converter->ymin = 0; converter->ymax = 0;
  1435.  
  1436.     max_num_spans = xmax - xmin + 1;
  1437.  
  1438.     if (max_num_spans > ARRAY_LENGTH(converter->spans_embedded)) {
  1439.         converter->spans = _cairo_malloc_ab (max_num_spans,
  1440.                                              sizeof (cairo_half_open_span_t));
  1441.         if (unlikely (converter->spans == NULL))
  1442.             return _cairo_error (CAIRO_STATUS_NO_MEMORY);
  1443.     } else
  1444.         converter->spans = converter->spans_embedded;
  1445.  
  1446.     xmin = int_to_grid_scaled_x(xmin);
  1447.     ymin = int_to_grid_scaled_y(ymin);
  1448.     xmax = int_to_grid_scaled_x(xmax);
  1449.     ymax = int_to_grid_scaled_y(ymax);
  1450.  
  1451.     active_list_reset(converter->active);
  1452.     cell_list_reset(converter->coverages);
  1453.     status = polygon_reset(converter->polygon, ymin, ymax);
  1454.     if (status)
  1455.         return status;
  1456.  
  1457.     converter->xmin = xmin;
  1458.     converter->xmax = xmax;
  1459.     converter->ymin = ymin;
  1460.     converter->ymax = ymax;
  1461.     return GLITTER_STATUS_SUCCESS;
  1462. }
  1463.  
  1464. /* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
  1465.  *   These macros convert an input coordinate in the client's
  1466.  *   device space to the rasterisation grid.
  1467.  */
  1468. /* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
  1469.  * shifts if possible, and something saneish if not.
  1470.  */
  1471. #if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
  1472. #  define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
  1473. #else
  1474. #  define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
  1475. #endif
  1476.  
  1477. #if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
  1478. #  define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
  1479. #else
  1480. #  define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
  1481. #endif
  1482.  
  1483. #define INPUT_TO_GRID_general(in, out, grid_scale) do {         \
  1484.     long long tmp__ = (long long)(grid_scale) * (in);   \
  1485.     tmp__ >>= GLITTER_INPUT_BITS;                               \
  1486.     (out) = tmp__;                                              \
  1487. } while (0)
  1488.  
  1489. /* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
  1490.  * converter.  The coordinates represent pixel positions scaled by
  1491.  * 2**GLITTER_PIXEL_BITS.  If this function fails then the scan
  1492.  * converter should be reset or destroyed.  Dir must be +1 or -1,
  1493.  * with the latter reversing the orientation of the edge. */
  1494. I void
  1495. glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
  1496.                                  const cairo_edge_t *edge)
  1497. {
  1498.     cairo_edge_t e;
  1499.  
  1500.     INPUT_TO_GRID_Y (edge->top, e.top);
  1501.     INPUT_TO_GRID_Y (edge->bottom, e.bottom);
  1502.     if (e.top >= e.bottom)
  1503.         return;
  1504.  
  1505.     /* XXX: possible overflows if GRID_X/Y > 2**GLITTER_INPUT_BITS */
  1506.     INPUT_TO_GRID_Y (edge->line.p1.y, e.line.p1.y);
  1507.     INPUT_TO_GRID_Y (edge->line.p2.y, e.line.p2.y);
  1508.     if (e.line.p1.y == e.line.p2.y)
  1509.         e.line.p2.y++; /* little fudge to prevent a div-by-zero */
  1510.  
  1511.     INPUT_TO_GRID_X (edge->line.p1.x, e.line.p1.x);
  1512.     INPUT_TO_GRID_X (edge->line.p2.x, e.line.p2.x);
  1513.  
  1514.     e.dir = edge->dir;
  1515.  
  1516.     polygon_add_edge (converter->polygon, &e);
  1517. }
  1518.  
  1519. static void
  1520. step_edges (struct active_list *active, int count)
  1521. {
  1522.     struct edge *edge;
  1523.  
  1524.     count *= GRID_Y;
  1525.     for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
  1526.         edge->height_left -= count;
  1527.         if (! edge->height_left) {
  1528.             edge->prev->next = edge->next;
  1529.             edge->next->prev = edge->prev;
  1530.             active->min_height = -1;
  1531.         }
  1532.     }
  1533. }
  1534.  
  1535. static glitter_status_t
  1536. blit_a8 (struct cell_list *cells,
  1537.          cairo_span_renderer_t *renderer,
  1538.          cairo_half_open_span_t *spans,
  1539.          int y, int height,
  1540.          int xmin, int xmax)
  1541. {
  1542.     struct cell *cell = cells->head.next;
  1543.     int prev_x = xmin, last_x = -1;
  1544.     int16_t cover = 0, last_cover = 0;
  1545.     unsigned num_spans;
  1546.  
  1547.     if (cell == &cells->tail)
  1548.         return CAIRO_STATUS_SUCCESS;
  1549.  
  1550.     /* Skip cells to the left of the clip region. */
  1551.     while (cell->x < xmin) {
  1552.         cover += cell->covered_height;
  1553.         cell = cell->next;
  1554.     }
  1555.     cover *= GRID_X*2;
  1556.  
  1557.     /* Form the spans from the coverages and areas. */
  1558.     num_spans = 0;
  1559.     for (; cell->x < xmax; cell = cell->next) {
  1560.         int x = cell->x;
  1561.         int16_t area;
  1562.  
  1563.         if (x > prev_x && cover != last_cover) {
  1564.             spans[num_spans].x = prev_x;
  1565.             spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
  1566.             last_cover = cover;
  1567.             last_x = prev_x;
  1568.             ++num_spans;
  1569.         }
  1570.  
  1571.         cover += cell->covered_height*GRID_X*2;
  1572.         area = cover - cell->uncovered_area;
  1573.  
  1574.         if (area != last_cover) {
  1575.             spans[num_spans].x = x;
  1576.             spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
  1577.             last_cover = area;
  1578.             last_x = x;
  1579.             ++num_spans;
  1580.         }
  1581.  
  1582.         prev_x = x+1;
  1583.     }
  1584.  
  1585.     if (prev_x <= xmax && cover != last_cover) {
  1586.         spans[num_spans].x = prev_x;
  1587.         spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
  1588.         last_cover = cover;
  1589.         last_x = prev_x;
  1590.         ++num_spans;
  1591.     }
  1592.  
  1593.     if (last_x < xmax && last_cover) {
  1594.         spans[num_spans].x = xmax;
  1595.         spans[num_spans].coverage = 0;
  1596.         ++num_spans;
  1597.     }
  1598.  
  1599.     /* Dump them into the renderer. */
  1600.     return renderer->render_rows (renderer, y, height, spans, num_spans);
  1601. }
  1602.  
  1603. #define GRID_AREA_TO_A1(A)  ((GRID_AREA_TO_ALPHA (A) > 127) ? 255 : 0)
  1604. static glitter_status_t
  1605. blit_a1 (struct cell_list *cells,
  1606.          cairo_span_renderer_t *renderer,
  1607.          cairo_half_open_span_t *spans,
  1608.          int y, int height,
  1609.          int xmin, int xmax)
  1610. {
  1611.     struct cell *cell = cells->head.next;
  1612.     int prev_x = xmin, last_x = -1;
  1613.     int16_t cover = 0;
  1614.     uint8_t coverage, last_cover = 0;
  1615.     unsigned num_spans;
  1616.  
  1617.     if (cell == &cells->tail)
  1618.         return CAIRO_STATUS_SUCCESS;
  1619.  
  1620.     /* Skip cells to the left of the clip region. */
  1621.     while (cell->x < xmin) {
  1622.         cover += cell->covered_height;
  1623.         cell = cell->next;
  1624.     }
  1625.     cover *= GRID_X*2;
  1626.  
  1627.     /* Form the spans from the coverages and areas. */
  1628.     num_spans = 0;
  1629.     for (; cell->x < xmax; cell = cell->next) {
  1630.         int x = cell->x;
  1631.         int16_t area;
  1632.  
  1633.         coverage = GRID_AREA_TO_A1 (cover);
  1634.         if (x > prev_x && coverage != last_cover) {
  1635.             last_x = spans[num_spans].x = prev_x;
  1636.             last_cover = spans[num_spans].coverage = coverage;
  1637.             ++num_spans;
  1638.         }
  1639.  
  1640.         cover += cell->covered_height*GRID_X*2;
  1641.         area = cover - cell->uncovered_area;
  1642.  
  1643.         coverage = GRID_AREA_TO_A1 (area);
  1644.         if (coverage != last_cover) {
  1645.             last_x = spans[num_spans].x = x;
  1646.             last_cover = spans[num_spans].coverage = coverage;
  1647.             ++num_spans;
  1648.         }
  1649.  
  1650.         prev_x = x+1;
  1651.     }
  1652.  
  1653.     coverage = GRID_AREA_TO_A1 (cover);
  1654.     if (prev_x <= xmax && coverage != last_cover) {
  1655.         last_x = spans[num_spans].x = prev_x;
  1656.         last_cover = spans[num_spans].coverage = coverage;
  1657.         ++num_spans;
  1658.     }
  1659.  
  1660.     if (last_x < xmax && last_cover) {
  1661.         spans[num_spans].x = xmax;
  1662.         spans[num_spans].coverage = 0;
  1663.         ++num_spans;
  1664.     }
  1665.     if (num_spans == 1)
  1666.         return CAIRO_STATUS_SUCCESS;
  1667.  
  1668.     /* Dump them into the renderer. */
  1669.     return renderer->render_rows (renderer, y, height, spans, num_spans);
  1670. }
  1671.  
  1672.  
  1673. I void
  1674. glitter_scan_converter_render(glitter_scan_converter_t *converter,
  1675.                               unsigned int winding_mask,
  1676.                               int antialias,
  1677.                               cairo_span_renderer_t *renderer)
  1678. {
  1679.     int i, j;
  1680.     int ymax_i = converter->ymax / GRID_Y;
  1681.     int ymin_i = converter->ymin / GRID_Y;
  1682.     int xmin_i, xmax_i;
  1683.     int h = ymax_i - ymin_i;
  1684.     struct polygon *polygon = converter->polygon;
  1685.     struct cell_list *coverages = converter->coverages;
  1686.     struct active_list *active = converter->active;
  1687.     struct edge *buckets[GRID_Y] = { 0 };
  1688.  
  1689.     xmin_i = converter->xmin / GRID_X;
  1690.     xmax_i = converter->xmax / GRID_X;
  1691.     if (xmin_i >= xmax_i)
  1692.         return;
  1693.  
  1694.     /* Render each pixel row. */
  1695.     for (i = 0; i < h; i = j) {
  1696.         int do_full_row = 0;
  1697.  
  1698.         j = i + 1;
  1699.  
  1700.         /* Determine if we can ignore this row or use the full pixel
  1701.          * stepper. */
  1702.         if (! polygon->y_buckets[i]) {
  1703.             if (active->head.next == &active->tail) {
  1704.                 active->min_height = INT_MAX;
  1705.                 active->is_vertical = 1;
  1706.                 for (; j < h && ! polygon->y_buckets[j]; j++)
  1707.                     ;
  1708.                 continue;
  1709.             }
  1710.  
  1711.             do_full_row = can_do_full_row (active);
  1712.         }
  1713.  
  1714.         if (do_full_row) {
  1715.             /* Step by a full pixel row's worth. */
  1716.             full_row (active, coverages, winding_mask);
  1717.  
  1718.             if (active->is_vertical) {
  1719.                 while (j < h &&
  1720.                        polygon->y_buckets[j] == NULL &&
  1721.                        active->min_height >= 2*GRID_Y)
  1722.                 {
  1723.                     active->min_height -= GRID_Y;
  1724.                     j++;
  1725.                 }
  1726.                 if (j != i + 1)
  1727.                     step_edges (active, j - (i + 1));
  1728.             }
  1729.         } else {
  1730.             int sub;
  1731.  
  1732.             polygon_fill_buckets (active,
  1733.                                   polygon->y_buckets[i],
  1734.                                   (i+ymin_i)*GRID_Y,
  1735.                                   buckets);
  1736.  
  1737.             /* Subsample this row. */
  1738.             for (sub = 0; sub < GRID_Y; sub++) {
  1739.                 if (buckets[sub]) {
  1740.                     active_list_merge_edges_from_bucket (active, buckets[sub]);
  1741.                     buckets[sub] = NULL;
  1742.                 }
  1743.  
  1744.                 sub_row (active, coverages, winding_mask);
  1745.             }
  1746.         }
  1747.  
  1748.         if (antialias)
  1749.             blit_a8 (coverages, renderer, converter->spans,
  1750.                      i+ymin_i, j-i, xmin_i, xmax_i);
  1751.         else
  1752.             blit_a1 (coverages, renderer, converter->spans,
  1753.                      i+ymin_i, j-i, xmin_i, xmax_i);
  1754.         cell_list_reset (coverages);
  1755.  
  1756.         active->min_height -= GRID_Y;
  1757.     }
  1758. }
  1759.  
  1760. struct _cairo_tor_scan_converter {
  1761.     cairo_scan_converter_t base;
  1762.  
  1763.     glitter_scan_converter_t converter[1];
  1764.     cairo_fill_rule_t fill_rule;
  1765.     cairo_antialias_t antialias;
  1766.  
  1767.     jmp_buf jmp;
  1768. };
  1769.  
  1770. typedef struct _cairo_tor_scan_converter cairo_tor_scan_converter_t;
  1771.  
  1772. static void
  1773. _cairo_tor_scan_converter_destroy (void *converter)
  1774. {
  1775.     cairo_tor_scan_converter_t *self = converter;
  1776.     if (self == NULL) {
  1777.         return;
  1778.     }
  1779.     _glitter_scan_converter_fini (self->converter);
  1780.     free(self);
  1781. }
  1782.  
  1783. cairo_status_t
  1784. _cairo_tor_scan_converter_add_polygon (void             *converter,
  1785.                                        const cairo_polygon_t *polygon)
  1786. {
  1787.     cairo_tor_scan_converter_t *self = converter;
  1788.     int i;
  1789.  
  1790. #if 0
  1791.     FILE *file = fopen ("polygon.txt", "w");
  1792.     _cairo_debug_print_polygon (file, polygon);
  1793.     fclose (file);
  1794. #endif
  1795.  
  1796.     for (i = 0; i < polygon->num_edges; i++)
  1797.          glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
  1798.  
  1799.     return CAIRO_STATUS_SUCCESS;
  1800. }
  1801.  
  1802. static cairo_status_t
  1803. _cairo_tor_scan_converter_generate (void                        *converter,
  1804.                                     cairo_span_renderer_t       *renderer)
  1805. {
  1806.     cairo_tor_scan_converter_t *self = converter;
  1807.     cairo_status_t status;
  1808.  
  1809.     if ((status = setjmp (self->jmp)))
  1810.         return _cairo_scan_converter_set_error (self, _cairo_error (status));
  1811.  
  1812.     glitter_scan_converter_render (self->converter,
  1813.                                    self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
  1814.                                    self->antialias != CAIRO_ANTIALIAS_NONE,
  1815.                                    renderer);
  1816.     return CAIRO_STATUS_SUCCESS;
  1817. }
  1818.  
  1819. cairo_scan_converter_t *
  1820. _cairo_tor_scan_converter_create (int                   xmin,
  1821.                                   int                   ymin,
  1822.                                   int                   xmax,
  1823.                                   int                   ymax,
  1824.                                   cairo_fill_rule_t     fill_rule,
  1825.                                   cairo_antialias_t     antialias)
  1826. {
  1827.     cairo_tor_scan_converter_t *self;
  1828.     cairo_status_t status;
  1829.  
  1830.     self = malloc (sizeof(struct _cairo_tor_scan_converter));
  1831.     if (unlikely (self == NULL)) {
  1832.         status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
  1833.         goto bail_nomem;
  1834.     }
  1835.  
  1836.     self->base.destroy = _cairo_tor_scan_converter_destroy;
  1837.     self->base.generate = _cairo_tor_scan_converter_generate;
  1838.  
  1839.     _glitter_scan_converter_init (self->converter, &self->jmp);
  1840.     status = glitter_scan_converter_reset (self->converter,
  1841.                                            xmin, ymin, xmax, ymax);
  1842.     if (unlikely (status))
  1843.         goto bail;
  1844.  
  1845.     self->fill_rule = fill_rule;
  1846.     self->antialias = antialias;
  1847.  
  1848.     return &self->base;
  1849.  
  1850.  bail:
  1851.     self->base.destroy(&self->base);
  1852.  bail_nomem:
  1853.     return _cairo_scan_converter_create_in_error (status);
  1854. }
  1855.