Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2004 Carl Worth
  3.  * Copyright © 2006 Red Hat, Inc.
  4.  * Copyright © 2007 David Turner
  5.  * Copyright © 2008 M Joonas Pihlaja
  6.  * Copyright © 2008 Chris Wilson
  7.  * Copyright © 2009 Intel Corporation
  8.  *
  9.  * This library is free software; you can redistribute it and/or
  10.  * modify it either under the terms of the GNU Lesser General Public
  11.  * License version 2.1 as published by the Free Software Foundation
  12.  * (the "LGPL") or, at your option, under the terms of the Mozilla
  13.  * Public License Version 1.1 (the "MPL"). If you do not alter this
  14.  * notice, a recipient may use your version of this file under either
  15.  * the MPL or the LGPL.
  16.  *
  17.  * You should have received a copy of the LGPL along with this library
  18.  * in the file COPYING-LGPL-2.1; if not, write to the Free Software
  19.  * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
  20.  * You should have received a copy of the MPL along with this library
  21.  * in the file COPYING-MPL-1.1
  22.  *
  23.  * The contents of this file are subject to the Mozilla Public License
  24.  * Version 1.1 (the "License"); you may not use this file except in
  25.  * compliance with the License. You may obtain a copy of the License at
  26.  * http://www.mozilla.org/MPL/
  27.  *
  28.  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
  29.  * OF ANY KIND, either express or implied. See the LGPL or the MPL for
  30.  * the specific language governing rights and limitations.
  31.  *
  32.  * The Original Code is the cairo graphics library.
  33.  *
  34.  * The Initial Developer of the Original Code is Carl Worth
  35.  *
  36.  * Contributor(s):
  37.  *      Carl D. Worth <cworth@cworth.org>
  38.  *      M Joonas Pihlaja <jpihlaja@cc.helsinki.fi>
  39.  *      Chris Wilson <chris@chris-wilson.co.uk>
  40.  */
  41.  
  42. /* Provide definitions for standalone compilation */
  43. #include "cairoint.h"
  44.  
  45. #include "cairo-error-private.h"
  46. #include "cairo-list-inline.h"
  47. #include "cairo-freelist-private.h"
  48. #include "cairo-combsort-inline.h"
  49.  
  50. #include <setjmp.h>
  51.  
  52. #define STEP_X CAIRO_FIXED_ONE
  53. #define STEP_Y CAIRO_FIXED_ONE
  54. #define UNROLL3(x) x x x
  55.  
  56. #define STEP_XY (2*STEP_X*STEP_Y) /* Unit area in the step. */
  57. #define AREA_TO_ALPHA(c)  (((c)*255 + STEP_XY/2) / STEP_XY)
  58.  
  59. typedef struct _cairo_bo_intersect_ordinate {
  60.     int32_t ordinate;
  61.     enum { EXACT, INEXACT } exactness;
  62. } cairo_bo_intersect_ordinate_t;
  63.  
  64. typedef struct _cairo_bo_intersect_point {
  65.     cairo_bo_intersect_ordinate_t x;
  66.     cairo_bo_intersect_ordinate_t y;
  67. } cairo_bo_intersect_point_t;
  68.  
  69. struct quorem {
  70.     cairo_fixed_t quo;
  71.     cairo_fixed_t rem;
  72. };
  73.  
  74. struct run {
  75.     struct run *next;
  76.     int sign;
  77.     cairo_fixed_t y;
  78. };
  79.  
  80. typedef struct edge {
  81.     cairo_list_t link;
  82.  
  83.     cairo_edge_t edge;
  84.  
  85.     /* Current x coordinate and advancement.
  86.      * Initialised to the x coordinate of the top of the
  87.      * edge. The quotient is in cairo_fixed_t units and the
  88.      * remainder is mod dy in cairo_fixed_t units.
  89.      */
  90.     cairo_fixed_t dy;
  91.     struct quorem x;
  92.     struct quorem dxdy;
  93.     struct quorem dxdy_full;
  94.  
  95.     cairo_bool_t vertical;
  96.     unsigned int flags;
  97.  
  98.     int current_sign;
  99.     struct run *runs;
  100. } edge_t;
  101.  
  102. enum {
  103.     START = 0x1,
  104.     STOP = 0x2,
  105. };
  106.  
  107. /* the parent is always given by index/2 */
  108. #define PQ_PARENT_INDEX(i) ((i) >> 1)
  109. #define PQ_FIRST_ENTRY 1
  110.  
  111. /* left and right children are index * 2 and (index * 2) +1 respectively */
  112. #define PQ_LEFT_CHILD_INDEX(i) ((i) << 1)
  113.  
  114. typedef enum {
  115.     EVENT_TYPE_STOP,
  116.     EVENT_TYPE_INTERSECTION,
  117.     EVENT_TYPE_START
  118. } event_type_t;
  119.  
  120. typedef struct _event {
  121.     cairo_fixed_t y;
  122.     event_type_t type;
  123. } event_t;
  124.  
  125. typedef struct _start_event {
  126.     cairo_fixed_t y;
  127.     event_type_t type;
  128.     edge_t *edge;
  129. } start_event_t;
  130.  
  131. typedef struct _queue_event {
  132.     cairo_fixed_t y;
  133.     event_type_t type;
  134.     edge_t *e1;
  135.     edge_t *e2;
  136. } queue_event_t;
  137.  
  138. typedef struct _pqueue {
  139.     int size, max_size;
  140.  
  141.     event_t **elements;
  142.     event_t *elements_embedded[1024];
  143. } pqueue_t;
  144.  
  145. struct cell {
  146.     struct cell *prev;
  147.     struct cell *next;
  148.     int          x;
  149.     int          uncovered_area;
  150.     int          covered_height;
  151. };
  152.  
  153. typedef struct _sweep_line {
  154.     cairo_list_t active;
  155.     cairo_list_t stopped;
  156.     cairo_list_t *insert_cursor;
  157.     cairo_bool_t is_vertical;
  158.  
  159.     cairo_fixed_t current_row;
  160.     cairo_fixed_t current_subrow;
  161.  
  162.     struct coverage {
  163.         struct cell head;
  164.         struct cell tail;
  165.  
  166.         struct cell *cursor;
  167.         int count;
  168.  
  169.         cairo_freepool_t pool;
  170.     } coverage;
  171.  
  172.     struct event_queue {
  173.         pqueue_t pq;
  174.         event_t **start_events;
  175.  
  176.         cairo_freepool_t pool;
  177.     } queue;
  178.  
  179.     cairo_freepool_t runs;
  180.  
  181.     jmp_buf unwind;
  182. } sweep_line_t;
  183.  
  184. cairo_always_inline static struct quorem
  185. floored_divrem (int a, int b)
  186. {
  187.     struct quorem qr;
  188.     qr.quo = a/b;
  189.     qr.rem = a%b;
  190.     if ((a^b)<0 && qr.rem) {
  191.         qr.quo--;
  192.         qr.rem += b;
  193.     }
  194.     return qr;
  195. }
  196.  
  197. static struct quorem
  198. floored_muldivrem(int x, int a, int b)
  199. {
  200.     struct quorem qr;
  201.     long long xa = (long long)x*a;
  202.     qr.quo = xa/b;
  203.     qr.rem = xa%b;
  204.     if ((xa>=0) != (b>=0) && qr.rem) {
  205.         qr.quo--;
  206.         qr.rem += b;
  207.     }
  208.     return qr;
  209. }
  210.  
  211. static cairo_fixed_t
  212. line_compute_intersection_x_for_y (const cairo_line_t *line,
  213.                                    cairo_fixed_t y)
  214. {
  215.     cairo_fixed_t x, dy;
  216.  
  217.     if (y == line->p1.y)
  218.         return line->p1.x;
  219.     if (y == line->p2.y)
  220.         return line->p2.x;
  221.  
  222.     x = line->p1.x;
  223.     dy = line->p2.y - line->p1.y;
  224.     if (dy != 0) {
  225.         x += _cairo_fixed_mul_div_floor (y - line->p1.y,
  226.                                          line->p2.x - line->p1.x,
  227.                                          dy);
  228.     }
  229.  
  230.     return x;
  231. }
  232.  
  233. /*
  234.  * We need to compare the x-coordinates of a pair of lines for a particular y,
  235.  * without loss of precision.
  236.  *
  237.  * The x-coordinate along an edge for a given y is:
  238.  *   X = A_x + (Y - A_y) * A_dx / A_dy
  239.  *
  240.  * So the inequality we wish to test is:
  241.  *   A_x + (Y - A_y) * A_dx / A_dy ∘ B_x + (Y - B_y) * B_dx / B_dy,
  242.  * where ∘ is our inequality operator.
  243.  *
  244.  * By construction, we know that A_dy and B_dy (and (Y - A_y), (Y - B_y)) are
  245.  * all positive, so we can rearrange it thus without causing a sign change:
  246.  *   A_dy * B_dy * (A_x - B_x) ∘ (Y - B_y) * B_dx * A_dy
  247.  *                                 - (Y - A_y) * A_dx * B_dy
  248.  *
  249.  * Given the assumption that all the deltas fit within 32 bits, we can compute
  250.  * this comparison directly using 128 bit arithmetic. For certain, but common,
  251.  * input we can reduce this down to a single 32 bit compare by inspecting the
  252.  * deltas.
  253.  *
  254.  * (And put the burden of the work on developing fast 128 bit ops, which are
  255.  * required throughout the tessellator.)
  256.  *
  257.  * See the similar discussion for _slope_compare().
  258.  */
  259. static int
  260. edges_compare_x_for_y_general (const cairo_edge_t *a,
  261.                                const cairo_edge_t *b,
  262.                                int32_t y)
  263. {
  264.     /* XXX: We're assuming here that dx and dy will still fit in 32
  265.      * bits. That's not true in general as there could be overflow. We
  266.      * should prevent that before the tessellation algorithm
  267.      * begins.
  268.      */
  269.     int32_t dx;
  270.     int32_t adx, ady;
  271.     int32_t bdx, bdy;
  272.     enum {
  273.        HAVE_NONE    = 0x0,
  274.        HAVE_DX      = 0x1,
  275.        HAVE_ADX     = 0x2,
  276.        HAVE_DX_ADX  = HAVE_DX | HAVE_ADX,
  277.        HAVE_BDX     = 0x4,
  278.        HAVE_DX_BDX  = HAVE_DX | HAVE_BDX,
  279.        HAVE_ADX_BDX = HAVE_ADX | HAVE_BDX,
  280.        HAVE_ALL     = HAVE_DX | HAVE_ADX | HAVE_BDX
  281.     } have_dx_adx_bdx = HAVE_ALL;
  282.  
  283.     /* don't bother solving for abscissa if the edges' bounding boxes
  284.      * can be used to order them. */
  285.     {
  286.            int32_t amin, amax;
  287.            int32_t bmin, bmax;
  288.            if (a->line.p1.x < a->line.p2.x) {
  289.                    amin = a->line.p1.x;
  290.                    amax = a->line.p2.x;
  291.            } else {
  292.                    amin = a->line.p2.x;
  293.                    amax = a->line.p1.x;
  294.            }
  295.            if (b->line.p1.x < b->line.p2.x) {
  296.                    bmin = b->line.p1.x;
  297.                    bmax = b->line.p2.x;
  298.            } else {
  299.                    bmin = b->line.p2.x;
  300.                    bmax = b->line.p1.x;
  301.            }
  302.            if (amax < bmin) return -1;
  303.            if (amin > bmax) return +1;
  304.     }
  305.  
  306.     ady = a->line.p2.y - a->line.p1.y;
  307.     adx = a->line.p2.x - a->line.p1.x;
  308.     if (adx == 0)
  309.         have_dx_adx_bdx &= ~HAVE_ADX;
  310.  
  311.     bdy = b->line.p2.y - b->line.p1.y;
  312.     bdx = b->line.p2.x - b->line.p1.x;
  313.     if (bdx == 0)
  314.         have_dx_adx_bdx &= ~HAVE_BDX;
  315.  
  316.     dx = a->line.p1.x - b->line.p1.x;
  317.     if (dx == 0)
  318.         have_dx_adx_bdx &= ~HAVE_DX;
  319.  
  320. #define L _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (ady, bdy), dx)
  321. #define A _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (adx, bdy), y - a->line.p1.y)
  322. #define B _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (bdx, ady), y - b->line.p1.y)
  323.     switch (have_dx_adx_bdx) {
  324.     default:
  325.     case HAVE_NONE:
  326.         return 0;
  327.     case HAVE_DX:
  328.         /* A_dy * B_dy * (A_x - B_x) ∘ 0 */
  329.         return dx; /* ady * bdy is positive definite */
  330.     case HAVE_ADX:
  331.         /* 0 ∘  - (Y - A_y) * A_dx * B_dy */
  332.         return adx; /* bdy * (y - a->top.y) is positive definite */
  333.     case HAVE_BDX:
  334.         /* 0 ∘ (Y - B_y) * B_dx * A_dy */
  335.         return -bdx; /* ady * (y - b->top.y) is positive definite */
  336.     case HAVE_ADX_BDX:
  337.         /*  0 ∘ (Y - B_y) * B_dx * A_dy - (Y - A_y) * A_dx * B_dy */
  338.         if ((adx ^ bdx) < 0) {
  339.             return adx;
  340.         } else if (a->line.p1.y == b->line.p1.y) { /* common origin */
  341.             cairo_int64_t adx_bdy, bdx_ady;
  342.  
  343.             /* ∴ A_dx * B_dy ∘ B_dx * A_dy */
  344.  
  345.             adx_bdy = _cairo_int32x32_64_mul (adx, bdy);
  346.             bdx_ady = _cairo_int32x32_64_mul (bdx, ady);
  347.  
  348.             return _cairo_int64_cmp (adx_bdy, bdx_ady);
  349.         } else
  350.             return _cairo_int128_cmp (A, B);
  351.     case HAVE_DX_ADX:
  352.         /* A_dy * (A_x - B_x) ∘ - (Y - A_y) * A_dx */
  353.         if ((-adx ^ dx) < 0) {
  354.             return dx;
  355.         } else {
  356.             cairo_int64_t ady_dx, dy_adx;
  357.  
  358.             ady_dx = _cairo_int32x32_64_mul (ady, dx);
  359.             dy_adx = _cairo_int32x32_64_mul (a->line.p1.y - y, adx);
  360.  
  361.             return _cairo_int64_cmp (ady_dx, dy_adx);
  362.         }
  363.     case HAVE_DX_BDX:
  364.         /* B_dy * (A_x - B_x) ∘ (Y - B_y) * B_dx */
  365.         if ((bdx ^ dx) < 0) {
  366.             return dx;
  367.         } else {
  368.             cairo_int64_t bdy_dx, dy_bdx;
  369.  
  370.             bdy_dx = _cairo_int32x32_64_mul (bdy, dx);
  371.             dy_bdx = _cairo_int32x32_64_mul (y - b->line.p1.y, bdx);
  372.  
  373.             return _cairo_int64_cmp (bdy_dx, dy_bdx);
  374.         }
  375.     case HAVE_ALL:
  376.         /* XXX try comparing (a->line.p2.x - b->line.p2.x) et al */
  377.         return _cairo_int128_cmp (L, _cairo_int128_sub (B, A));
  378.     }
  379. #undef B
  380. #undef A
  381. #undef L
  382. }
  383.  
  384. /*
  385.  * We need to compare the x-coordinate of a line for a particular y wrt to a
  386.  * given x, without loss of precision.
  387.  *
  388.  * The x-coordinate along an edge for a given y is:
  389.  *   X = A_x + (Y - A_y) * A_dx / A_dy
  390.  *
  391.  * So the inequality we wish to test is:
  392.  *   A_x + (Y - A_y) * A_dx / A_dy ∘ X
  393.  * where ∘ is our inequality operator.
  394.  *
  395.  * By construction, we know that A_dy (and (Y - A_y)) are
  396.  * all positive, so we can rearrange it thus without causing a sign change:
  397.  *   (Y - A_y) * A_dx ∘ (X - A_x) * A_dy
  398.  *
  399.  * Given the assumption that all the deltas fit within 32 bits, we can compute
  400.  * this comparison directly using 64 bit arithmetic.
  401.  *
  402.  * See the similar discussion for _slope_compare() and
  403.  * edges_compare_x_for_y_general().
  404.  */
  405. static int
  406. edge_compare_for_y_against_x (const cairo_edge_t *a,
  407.                               int32_t y,
  408.                               int32_t x)
  409. {
  410.     int32_t adx, ady;
  411.     int32_t dx, dy;
  412.     cairo_int64_t L, R;
  413.  
  414.     if (a->line.p1.x <= a->line.p2.x) {
  415.         if (x < a->line.p1.x)
  416.             return 1;
  417.         if (x > a->line.p2.x)
  418.             return -1;
  419.     } else {
  420.         if (x < a->line.p2.x)
  421.             return 1;
  422.         if (x > a->line.p1.x)
  423.             return -1;
  424.     }
  425.  
  426.     adx = a->line.p2.x - a->line.p1.x;
  427.     dx = x - a->line.p1.x;
  428.  
  429.     if (adx == 0)
  430.         return -dx;
  431.     if (dx == 0 || (adx ^ dx) < 0)
  432.         return adx;
  433.  
  434.     dy = y - a->line.p1.y;
  435.     ady = a->line.p2.y - a->line.p1.y;
  436.  
  437.     L = _cairo_int32x32_64_mul (dy, adx);
  438.     R = _cairo_int32x32_64_mul (dx, ady);
  439.  
  440.     return _cairo_int64_cmp (L, R);
  441. }
  442.  
  443. static int
  444. edges_compare_x_for_y (const cairo_edge_t *a,
  445.                        const cairo_edge_t *b,
  446.                        int32_t y)
  447. {
  448.     /* If the sweep-line is currently on an end-point of a line,
  449.      * then we know its precise x value (and considering that we often need to
  450.      * compare events at end-points, this happens frequently enough to warrant
  451.      * special casing).
  452.      */
  453.     enum {
  454.        HAVE_NEITHER = 0x0,
  455.        HAVE_AX      = 0x1,
  456.        HAVE_BX      = 0x2,
  457.        HAVE_BOTH    = HAVE_AX | HAVE_BX
  458.     } have_ax_bx = HAVE_BOTH;
  459.     int32_t ax, bx;
  460.  
  461.     /* XXX given we have x and dx? */
  462.  
  463.     if (y == a->line.p1.y)
  464.         ax = a->line.p1.x;
  465.     else if (y == a->line.p2.y)
  466.         ax = a->line.p2.x;
  467.     else
  468.         have_ax_bx &= ~HAVE_AX;
  469.  
  470.     if (y == b->line.p1.y)
  471.         bx = b->line.p1.x;
  472.     else if (y == b->line.p2.y)
  473.         bx = b->line.p2.x;
  474.     else
  475.         have_ax_bx &= ~HAVE_BX;
  476.  
  477.     switch (have_ax_bx) {
  478.     default:
  479.     case HAVE_NEITHER:
  480.         return edges_compare_x_for_y_general (a, b, y);
  481.     case HAVE_AX:
  482.         return -edge_compare_for_y_against_x (b, y, ax);
  483.     case HAVE_BX:
  484.         return edge_compare_for_y_against_x (a, y, bx);
  485.     case HAVE_BOTH:
  486.         return ax - bx;
  487.     }
  488. }
  489.  
  490. static inline int
  491. slope_compare (const edge_t *a,
  492.                const edge_t *b)
  493. {
  494.     cairo_int64_t L, R;
  495.     int cmp;
  496.  
  497.     cmp = a->dxdy.quo - b->dxdy.quo;
  498.     if (cmp)
  499.         return cmp;
  500.  
  501.     if (a->dxdy.rem == 0)
  502.         return -b->dxdy.rem;
  503.     if (b->dxdy.rem == 0)
  504.         return a->dxdy.rem;
  505.  
  506.     L = _cairo_int32x32_64_mul (b->dy, a->dxdy.rem);
  507.     R = _cairo_int32x32_64_mul (a->dy, b->dxdy.rem);
  508.     return _cairo_int64_cmp (L, R);
  509. }
  510.  
  511. static inline int
  512. line_equal (const cairo_line_t *a, const cairo_line_t *b)
  513. {
  514.     return a->p1.x == b->p1.x && a->p1.y == b->p1.y &&
  515.            a->p2.x == b->p2.x && a->p2.y == b->p2.y;
  516. }
  517.  
  518. static inline int
  519. sweep_line_compare_edges (const edge_t  *a,
  520.                           const edge_t  *b,
  521.                           cairo_fixed_t y)
  522. {
  523.     int cmp;
  524.  
  525.     if (line_equal (&a->edge.line, &b->edge.line))
  526.         return 0;
  527.  
  528.     cmp = edges_compare_x_for_y (&a->edge, &b->edge, y);
  529.     if (cmp)
  530.         return cmp;
  531.  
  532.     return slope_compare (a, b);
  533. }
  534.  
  535. static inline cairo_int64_t
  536. det32_64 (int32_t a, int32_t b,
  537.           int32_t c, int32_t d)
  538. {
  539.     /* det = a * d - b * c */
  540.     return _cairo_int64_sub (_cairo_int32x32_64_mul (a, d),
  541.                              _cairo_int32x32_64_mul (b, c));
  542. }
  543.  
  544. static inline cairo_int128_t
  545. det64x32_128 (cairo_int64_t a, int32_t       b,
  546.               cairo_int64_t c, int32_t       d)
  547. {
  548.     /* det = a * d - b * c */
  549.     return _cairo_int128_sub (_cairo_int64x32_128_mul (a, d),
  550.                               _cairo_int64x32_128_mul (c, b));
  551. }
  552.  
  553. /* Compute the intersection of two lines as defined by two edges. The
  554.  * result is provided as a coordinate pair of 128-bit integers.
  555.  *
  556.  * Returns %CAIRO_BO_STATUS_INTERSECTION if there is an intersection or
  557.  * %CAIRO_BO_STATUS_PARALLEL if the two lines are exactly parallel.
  558.  */
  559. static cairo_bool_t
  560. intersect_lines (const edge_t *a, const edge_t *b,
  561.                  cairo_bo_intersect_point_t     *intersection)
  562. {
  563.     cairo_int64_t a_det, b_det;
  564.  
  565.     /* XXX: We're assuming here that dx and dy will still fit in 32
  566.      * bits. That's not true in general as there could be overflow. We
  567.      * should prevent that before the tessellation algorithm begins.
  568.      * What we're doing to mitigate this is to perform clamping in
  569.      * cairo_bo_tessellate_polygon().
  570.      */
  571.     int32_t dx1 = a->edge.line.p1.x - a->edge.line.p2.x;
  572.     int32_t dy1 = a->edge.line.p1.y - a->edge.line.p2.y;
  573.  
  574.     int32_t dx2 = b->edge.line.p1.x - b->edge.line.p2.x;
  575.     int32_t dy2 = b->edge.line.p1.y - b->edge.line.p2.y;
  576.  
  577.     cairo_int64_t den_det;
  578.     cairo_int64_t R;
  579.     cairo_quorem64_t qr;
  580.  
  581.     den_det = det32_64 (dx1, dy1, dx2, dy2);
  582.  
  583.      /* Q: Can we determine that the lines do not intersect (within range)
  584.       * much more cheaply than computing the intersection point i.e. by
  585.       * avoiding the division?
  586.       *
  587.       *   X = ax + t * adx = bx + s * bdx;
  588.       *   Y = ay + t * ady = by + s * bdy;
  589.       *   ∴ t * (ady*bdx - bdy*adx) = bdx * (by - ay) + bdy * (ax - bx)
  590.       *   => t * L = R
  591.       *
  592.       * Therefore we can reject any intersection (under the criteria for
  593.       * valid intersection events) if:
  594.       *   L^R < 0 => t < 0, or
  595.       *   L<R => t > 1
  596.       *
  597.       * (where top/bottom must at least extend to the line endpoints).
  598.       *
  599.       * A similar substitution can be performed for s, yielding:
  600.       *   s * (ady*bdx - bdy*adx) = ady * (ax - bx) - adx * (ay - by)
  601.       */
  602.     R = det32_64 (dx2, dy2,
  603.                   b->edge.line.p1.x - a->edge.line.p1.x,
  604.                   b->edge.line.p1.y - a->edge.line.p1.y);
  605.     if (_cairo_int64_negative (den_det)) {
  606.         if (_cairo_int64_ge (den_det, R))
  607.             return FALSE;
  608.     } else {
  609.         if (_cairo_int64_le (den_det, R))
  610.             return FALSE;
  611.     }
  612.  
  613.     R = det32_64 (dy1, dx1,
  614.                   a->edge.line.p1.y - b->edge.line.p1.y,
  615.                   a->edge.line.p1.x - b->edge.line.p1.x);
  616.     if (_cairo_int64_negative (den_det)) {
  617.         if (_cairo_int64_ge (den_det, R))
  618.             return FALSE;
  619.     } else {
  620.         if (_cairo_int64_le (den_det, R))
  621.             return FALSE;
  622.     }
  623.  
  624.     /* We now know that the two lines should intersect within range. */
  625.  
  626.     a_det = det32_64 (a->edge.line.p1.x, a->edge.line.p1.y,
  627.                       a->edge.line.p2.x, a->edge.line.p2.y);
  628.     b_det = det32_64 (b->edge.line.p1.x, b->edge.line.p1.y,
  629.                       b->edge.line.p2.x, b->edge.line.p2.y);
  630.  
  631.     /* x = det (a_det, dx1, b_det, dx2) / den_det */
  632.     qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dx1,
  633.                                                        b_det, dx2),
  634.                                          den_det);
  635.     if (_cairo_int64_eq (qr.rem, den_det))
  636.         return FALSE;
  637. #if 0
  638.     intersection->x.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
  639. #else
  640.     intersection->x.exactness = EXACT;
  641.     if (! _cairo_int64_is_zero (qr.rem)) {
  642.         if (_cairo_int64_negative (den_det) ^ _cairo_int64_negative (qr.rem))
  643.             qr.rem = _cairo_int64_negate (qr.rem);
  644.         qr.rem = _cairo_int64_mul (qr.rem, _cairo_int32_to_int64 (2));
  645.         if (_cairo_int64_ge (qr.rem, den_det)) {
  646.             qr.quo = _cairo_int64_add (qr.quo,
  647.                                        _cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
  648.         } else
  649.             intersection->x.exactness = INEXACT;
  650.     }
  651. #endif
  652.     intersection->x.ordinate = _cairo_int64_to_int32 (qr.quo);
  653.  
  654.     /* y = det (a_det, dy1, b_det, dy2) / den_det */
  655.     qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dy1,
  656.                                                        b_det, dy2),
  657.                                          den_det);
  658.     if (_cairo_int64_eq (qr.rem, den_det))
  659.         return FALSE;
  660. #if 0
  661.     intersection->y.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
  662. #else
  663.     intersection->y.exactness = EXACT;
  664.     if (! _cairo_int64_is_zero (qr.rem)) {
  665.         /* compute ceiling away from zero */
  666.         qr.quo = _cairo_int64_add (qr.quo,
  667.                                    _cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
  668.         intersection->y.exactness = INEXACT;
  669.     }
  670. #endif
  671.     intersection->y.ordinate = _cairo_int64_to_int32 (qr.quo);
  672.  
  673.     return TRUE;
  674. }
  675.  
  676. static int
  677. bo_intersect_ordinate_32_compare (int32_t a, int32_t b, int exactness)
  678. {
  679.     int cmp;
  680.  
  681.     /* First compare the quotient */
  682.     cmp = a - b;
  683.     if (cmp)
  684.         return cmp;
  685.  
  686.     /* With quotient identical, if remainder is 0 then compare equal */
  687.     /* Otherwise, the non-zero remainder makes a > b */
  688.     return -(INEXACT == exactness);
  689. }
  690.  
  691. /* Does the given edge contain the given point. The point must already
  692.  * be known to be contained within the line determined by the edge,
  693.  * (most likely the point results from an intersection of this edge
  694.  * with another).
  695.  *
  696.  * If we had exact arithmetic, then this function would simply be a
  697.  * matter of examining whether the y value of the point lies within
  698.  * the range of y values of the edge. But since intersection points
  699.  * are not exact due to being rounded to the nearest integer within
  700.  * the available precision, we must also examine the x value of the
  701.  * point.
  702.  *
  703.  * The definition of "contains" here is that the given intersection
  704.  * point will be seen by the sweep line after the start event for the
  705.  * given edge and before the stop event for the edge. See the comments
  706.  * in the implementation for more details.
  707.  */
  708. static cairo_bool_t
  709. bo_edge_contains_intersect_point (const edge_t                  *edge,
  710.                                   cairo_bo_intersect_point_t    *point)
  711. {
  712.     int cmp_top, cmp_bottom;
  713.  
  714.     /* XXX: When running the actual algorithm, we don't actually need to
  715.      * compare against edge->top at all here, since any intersection above
  716.      * top is eliminated early via a slope comparison. We're leaving these
  717.      * here for now only for the sake of the quadratic-time intersection
  718.      * finder which needs them.
  719.      */
  720.  
  721.     cmp_top = bo_intersect_ordinate_32_compare (point->y.ordinate,
  722.                                                 edge->edge.top,
  723.                                                 point->y.exactness);
  724.     if (cmp_top < 0)
  725.         return FALSE;
  726.  
  727.     cmp_bottom = bo_intersect_ordinate_32_compare (point->y.ordinate,
  728.                                                    edge->edge.bottom,
  729.                                                    point->y.exactness);
  730.     if (cmp_bottom > 0)
  731.         return FALSE;
  732.  
  733.     if (cmp_top > 0 && cmp_bottom < 0)
  734.         return TRUE;
  735.  
  736.     /* At this stage, the point lies on the same y value as either
  737.      * edge->top or edge->bottom, so we have to examine the x value in
  738.      * order to properly determine containment. */
  739.  
  740.     /* If the y value of the point is the same as the y value of the
  741.      * top of the edge, then the x value of the point must be greater
  742.      * to be considered as inside the edge. Similarly, if the y value
  743.      * of the point is the same as the y value of the bottom of the
  744.      * edge, then the x value of the point must be less to be
  745.      * considered as inside. */
  746.  
  747.     if (cmp_top == 0) {
  748.         cairo_fixed_t top_x;
  749.  
  750.         top_x = line_compute_intersection_x_for_y (&edge->edge.line,
  751.                                                    edge->edge.top);
  752.         return bo_intersect_ordinate_32_compare (top_x, point->x.ordinate, point->x.exactness) < 0;
  753.     } else { /* cmp_bottom == 0 */
  754.         cairo_fixed_t bot_x;
  755.  
  756.         bot_x = line_compute_intersection_x_for_y (&edge->edge.line,
  757.                                                    edge->edge.bottom);
  758.         return bo_intersect_ordinate_32_compare (point->x.ordinate, bot_x, point->x.exactness) < 0;
  759.     }
  760. }
  761.  
  762. static cairo_bool_t
  763. edge_intersect (const edge_t            *a,
  764.                 const edge_t            *b,
  765.                 cairo_point_t   *intersection)
  766. {
  767.     cairo_bo_intersect_point_t quorem;
  768.  
  769.     if (! intersect_lines (a, b, &quorem))
  770.         return FALSE;
  771.  
  772.     if (a->edge.top != a->edge.line.p1.y || a->edge.bottom != a->edge.line.p2.y) {
  773.         if (! bo_edge_contains_intersect_point (a, &quorem))
  774.             return FALSE;
  775.     }
  776.  
  777.     if (b->edge.top != b->edge.line.p1.y || b->edge.bottom != b->edge.line.p2.y) {
  778.         if (! bo_edge_contains_intersect_point (b, &quorem))
  779.             return FALSE;
  780.     }
  781.  
  782.     /* Now that we've correctly compared the intersection point and
  783.      * determined that it lies within the edge, then we know that we
  784.      * no longer need any more bits of storage for the intersection
  785.      * than we do for our edge coordinates. We also no longer need the
  786.      * remainder from the division. */
  787.     intersection->x = quorem.x.ordinate;
  788.     intersection->y = quorem.y.ordinate;
  789.  
  790.     return TRUE;
  791. }
  792.  
  793. static inline int
  794. event_compare (const event_t *a, const event_t *b)
  795. {
  796.     return a->y - b->y;
  797. }
  798.  
  799. static void
  800. pqueue_init (pqueue_t *pq)
  801. {
  802.     pq->max_size = ARRAY_LENGTH (pq->elements_embedded);
  803.     pq->size = 0;
  804.  
  805.     pq->elements = pq->elements_embedded;
  806. }
  807.  
  808. static void
  809. pqueue_fini (pqueue_t *pq)
  810. {
  811.     if (pq->elements != pq->elements_embedded)
  812.         free (pq->elements);
  813. }
  814.  
  815. static cairo_bool_t
  816. pqueue_grow (pqueue_t *pq)
  817. {
  818.     event_t **new_elements;
  819.     pq->max_size *= 2;
  820.  
  821.     if (pq->elements == pq->elements_embedded) {
  822.         new_elements = _cairo_malloc_ab (pq->max_size,
  823.                                          sizeof (event_t *));
  824.         if (unlikely (new_elements == NULL))
  825.             return FALSE;
  826.  
  827.         memcpy (new_elements, pq->elements_embedded,
  828.                 sizeof (pq->elements_embedded));
  829.     } else {
  830.         new_elements = _cairo_realloc_ab (pq->elements,
  831.                                           pq->max_size,
  832.                                           sizeof (event_t *));
  833.         if (unlikely (new_elements == NULL))
  834.             return FALSE;
  835.     }
  836.  
  837.     pq->elements = new_elements;
  838.     return TRUE;
  839. }
  840.  
  841. static inline void
  842. pqueue_push (sweep_line_t *sweep_line, event_t *event)
  843. {
  844.     event_t **elements;
  845.     int i, parent;
  846.  
  847.     if (unlikely (sweep_line->queue.pq.size + 1 == sweep_line->queue.pq.max_size)) {
  848.         if (unlikely (! pqueue_grow (&sweep_line->queue.pq))) {
  849.             longjmp (sweep_line->unwind,
  850.                      _cairo_error (CAIRO_STATUS_NO_MEMORY));
  851.         }
  852.     }
  853.  
  854.     elements = sweep_line->queue.pq.elements;
  855.     for (i = ++sweep_line->queue.pq.size;
  856.          i != PQ_FIRST_ENTRY &&
  857.          event_compare (event,
  858.                         elements[parent = PQ_PARENT_INDEX (i)]) < 0;
  859.          i = parent)
  860.     {
  861.         elements[i] = elements[parent];
  862.     }
  863.  
  864.     elements[i] = event;
  865. }
  866.  
  867. static inline void
  868. pqueue_pop (pqueue_t *pq)
  869. {
  870.     event_t **elements = pq->elements;
  871.     event_t *tail;
  872.     int child, i;
  873.  
  874.     tail = elements[pq->size--];
  875.     if (pq->size == 0) {
  876.         elements[PQ_FIRST_ENTRY] = NULL;
  877.         return;
  878.     }
  879.  
  880.     for (i = PQ_FIRST_ENTRY;
  881.          (child = PQ_LEFT_CHILD_INDEX (i)) <= pq->size;
  882.          i = child)
  883.     {
  884.         if (child != pq->size &&
  885.             event_compare (elements[child+1],
  886.                            elements[child]) < 0)
  887.         {
  888.             child++;
  889.         }
  890.  
  891.         if (event_compare (elements[child], tail) >= 0)
  892.             break;
  893.  
  894.         elements[i] = elements[child];
  895.     }
  896.     elements[i] = tail;
  897. }
  898.  
  899. static inline void
  900. event_insert (sweep_line_t      *sweep_line,
  901.               event_type_t       type,
  902.               edge_t            *e1,
  903.               edge_t            *e2,
  904.               cairo_fixed_t      y)
  905. {
  906.     queue_event_t *event;
  907.  
  908.     event = _cairo_freepool_alloc (&sweep_line->queue.pool);
  909.     if (unlikely (event == NULL)) {
  910.         longjmp (sweep_line->unwind,
  911.                  _cairo_error (CAIRO_STATUS_NO_MEMORY));
  912.     }
  913.  
  914.     event->y = y;
  915.     event->type = type;
  916.     event->e1 = e1;
  917.     event->e2 = e2;
  918.  
  919.     pqueue_push (sweep_line, (event_t *) event);
  920. }
  921.  
  922. static void
  923. event_delete (sweep_line_t      *sweep_line,
  924.               event_t           *event)
  925. {
  926.     _cairo_freepool_free (&sweep_line->queue.pool, event);
  927. }
  928.  
  929. static inline event_t *
  930. event_next (sweep_line_t *sweep_line)
  931. {
  932.     event_t *event, *cmp;
  933.  
  934.     event = sweep_line->queue.pq.elements[PQ_FIRST_ENTRY];
  935.     cmp = *sweep_line->queue.start_events;
  936.     if (event == NULL ||
  937.         (cmp != NULL && event_compare (cmp, event) < 0))
  938.     {
  939.         event = cmp;
  940.         sweep_line->queue.start_events++;
  941.     }
  942.     else
  943.     {
  944.         pqueue_pop (&sweep_line->queue.pq);
  945.     }
  946.  
  947.     return event;
  948. }
  949.  
  950. CAIRO_COMBSORT_DECLARE (start_event_sort, event_t *, event_compare)
  951.  
  952. static inline void
  953. event_insert_stop (sweep_line_t *sweep_line,
  954.                    edge_t       *edge)
  955. {
  956.     event_insert (sweep_line,
  957.                   EVENT_TYPE_STOP,
  958.                   edge, NULL,
  959.                   edge->edge.bottom);
  960. }
  961.  
  962. static inline void
  963. event_insert_if_intersect_below_current_y (sweep_line_t *sweep_line,
  964.                                            edge_t       *left,
  965.                                            edge_t       *right)
  966. {
  967.     cairo_point_t intersection;
  968.  
  969.     /* start points intersect */
  970.     if (left->edge.line.p1.x == right->edge.line.p1.x &&
  971.         left->edge.line.p1.y == right->edge.line.p1.y)
  972.     {
  973.         return;
  974.     }
  975.  
  976.     /* end points intersect, process DELETE events first */
  977.     if (left->edge.line.p2.x == right->edge.line.p2.x &&
  978.         left->edge.line.p2.y == right->edge.line.p2.y)
  979.     {
  980.         return;
  981.     }
  982.  
  983.     if (slope_compare (left, right) <= 0)
  984.         return;
  985.  
  986.     if (! edge_intersect (left, right, &intersection))
  987.         return;
  988.  
  989.     event_insert (sweep_line,
  990.                   EVENT_TYPE_INTERSECTION,
  991.                   left, right,
  992.                   intersection.y);
  993. }
  994.  
  995. static inline edge_t *
  996. link_to_edge (cairo_list_t *link)
  997. {
  998.     return (edge_t *) link;
  999. }
  1000.  
  1001. static void
  1002. sweep_line_insert (sweep_line_t *sweep_line,
  1003.                    edge_t       *edge)
  1004. {
  1005.     cairo_list_t *pos;
  1006.     cairo_fixed_t y = sweep_line->current_subrow;
  1007.  
  1008.     pos = sweep_line->insert_cursor;
  1009.     if (pos == &sweep_line->active)
  1010.         pos = sweep_line->active.next;
  1011.     if (pos != &sweep_line->active) {
  1012.         int cmp;
  1013.  
  1014.         cmp = sweep_line_compare_edges (link_to_edge (pos),
  1015.                                         edge,
  1016.                                         y);
  1017.         if (cmp < 0) {
  1018.             while (pos->next != &sweep_line->active &&
  1019.                    sweep_line_compare_edges (link_to_edge (pos->next),
  1020.                                              edge,
  1021.                                              y) < 0)
  1022.             {
  1023.                 pos = pos->next;
  1024.             }
  1025.         } else if (cmp > 0) {
  1026.             do {
  1027.                 pos = pos->prev;
  1028.             } while (pos != &sweep_line->active &&
  1029.                      sweep_line_compare_edges (link_to_edge (pos),
  1030.                                                edge,
  1031.                                                y) > 0);
  1032.         }
  1033.     }
  1034.     cairo_list_add (&edge->link, pos);
  1035.     sweep_line->insert_cursor = &edge->link;
  1036. }
  1037.  
  1038. inline static void
  1039. coverage_rewind (struct coverage *cells)
  1040. {
  1041.     cells->cursor = &cells->head;
  1042. }
  1043.  
  1044. static void
  1045. coverage_init (struct coverage *cells)
  1046. {
  1047.     _cairo_freepool_init (&cells->pool,
  1048.                           sizeof (struct cell));
  1049.     cells->head.prev = NULL;
  1050.     cells->head.next = &cells->tail;
  1051.     cells->head.x = INT_MIN;
  1052.     cells->tail.prev = &cells->head;
  1053.     cells->tail.next = NULL;
  1054.     cells->tail.x = INT_MAX;
  1055.     cells->count = 0;
  1056.     coverage_rewind (cells);
  1057. }
  1058.  
  1059. static void
  1060. coverage_fini (struct coverage *cells)
  1061. {
  1062.     _cairo_freepool_fini (&cells->pool);
  1063. }
  1064.  
  1065. inline static void
  1066. coverage_reset (struct coverage *cells)
  1067. {
  1068.     cells->head.next = &cells->tail;
  1069.     cells->tail.prev = &cells->head;
  1070.     cells->count = 0;
  1071.     _cairo_freepool_reset (&cells->pool);
  1072.     coverage_rewind (cells);
  1073. }
  1074.  
  1075. static struct cell *
  1076. coverage_alloc (sweep_line_t *sweep_line,
  1077.                 struct cell *tail,
  1078.                 int x)
  1079. {
  1080.     struct cell *cell;
  1081.  
  1082.     cell = _cairo_freepool_alloc (&sweep_line->coverage.pool);
  1083.     if (unlikely (NULL == cell)) {
  1084.         longjmp (sweep_line->unwind,
  1085.                  _cairo_error (CAIRO_STATUS_NO_MEMORY));
  1086.     }
  1087.  
  1088.     tail->prev->next = cell;
  1089.     cell->prev = tail->prev;
  1090.     cell->next = tail;
  1091.     tail->prev = cell;
  1092.     cell->x = x;
  1093.     cell->uncovered_area = 0;
  1094.     cell->covered_height = 0;
  1095.     sweep_line->coverage.count++;
  1096.     return cell;
  1097. }
  1098.  
  1099. inline static struct cell *
  1100. coverage_find (sweep_line_t *sweep_line, int x)
  1101. {
  1102.     struct cell *cell;
  1103.  
  1104.     cell = sweep_line->coverage.cursor;
  1105.     if (unlikely (cell->x > x)) {
  1106.         do {
  1107.             if (cell->prev->x < x)
  1108.                 break;
  1109.             cell = cell->prev;
  1110.         } while (TRUE);
  1111.     } else {
  1112.         if (cell->x == x)
  1113.             return cell;
  1114.  
  1115.         do {
  1116.             UNROLL3({
  1117.                     cell = cell->next;
  1118.                     if (cell->x >= x)
  1119.                         break;
  1120.                     });
  1121.         } while (TRUE);
  1122.     }
  1123.  
  1124.     if (cell->x != x)
  1125.         cell = coverage_alloc (sweep_line, cell, x);
  1126.  
  1127.     return sweep_line->coverage.cursor = cell;
  1128. }
  1129.  
  1130. static void
  1131. coverage_render_cells (sweep_line_t *sweep_line,
  1132.                        cairo_fixed_t left, cairo_fixed_t right,
  1133.                        cairo_fixed_t y1, cairo_fixed_t y2,
  1134.                        int sign)
  1135. {
  1136.     int fx1, fx2;
  1137.     int ix1, ix2;
  1138.     int dx, dy;
  1139.  
  1140.     /* Orient the edge left-to-right. */
  1141.     dx = right - left;
  1142.     if (dx >= 0) {
  1143.         ix1 = _cairo_fixed_integer_part (left);
  1144.         fx1 = _cairo_fixed_fractional_part (left);
  1145.  
  1146.         ix2 = _cairo_fixed_integer_part (right);
  1147.         fx2 = _cairo_fixed_fractional_part (right);
  1148.  
  1149.         dy = y2 - y1;
  1150.     } else {
  1151.         ix1 = _cairo_fixed_integer_part (right);
  1152.         fx1 = _cairo_fixed_fractional_part (right);
  1153.  
  1154.         ix2 = _cairo_fixed_integer_part (left);
  1155.         fx2 = _cairo_fixed_fractional_part (left);
  1156.  
  1157.         dx = -dx;
  1158.         sign = -sign;
  1159.         dy = y1 - y2;
  1160.         y1 = y2 - dy;
  1161.         y2 = y1 + dy;
  1162.     }
  1163.  
  1164.     /* Add coverage for all pixels [ix1,ix2] on this row crossed
  1165.      * by the edge. */
  1166.     {
  1167.         struct quorem y = floored_divrem ((STEP_X - fx1)*dy, dx);
  1168.         struct cell *cell;
  1169.  
  1170.         cell = sweep_line->coverage.cursor;
  1171.         if (cell->x != ix1) {
  1172.             if (unlikely (cell->x > ix1)) {
  1173.                 do {
  1174.                     if (cell->prev->x < ix1)
  1175.                         break;
  1176.                     cell = cell->prev;
  1177.                 } while (TRUE);
  1178.             } else do {
  1179.                 UNROLL3({
  1180.                         if (cell->x >= ix1)
  1181.                             break;
  1182.                         cell = cell->next;
  1183.                         });
  1184.             } while (TRUE);
  1185.  
  1186.             if (cell->x != ix1)
  1187.                 cell = coverage_alloc (sweep_line, cell, ix1);
  1188.         }
  1189.  
  1190.         cell->uncovered_area += sign * y.quo * (STEP_X + fx1);
  1191.         cell->covered_height += sign * y.quo;
  1192.         y.quo += y1;
  1193.  
  1194.         cell = cell->next;
  1195.         if (cell->x != ++ix1)
  1196.             cell = coverage_alloc (sweep_line, cell, ix1);
  1197.         if (ix1 < ix2) {
  1198.             struct quorem dydx_full = floored_divrem (STEP_X*dy, dx);
  1199.  
  1200.             do {
  1201.                 cairo_fixed_t y_skip = dydx_full.quo;
  1202.                 y.rem += dydx_full.rem;
  1203.                 if (y.rem >= dx) {
  1204.                     ++y_skip;
  1205.                     y.rem -= dx;
  1206.                 }
  1207.  
  1208.                 y.quo += y_skip;
  1209.  
  1210.                 y_skip *= sign;
  1211.                 cell->covered_height += y_skip;
  1212.                 cell->uncovered_area += y_skip*STEP_X;
  1213.  
  1214.                 cell = cell->next;
  1215.                 if (cell->x != ++ix1)
  1216.                     cell = coverage_alloc (sweep_line, cell, ix1);
  1217.             } while (ix1 != ix2);
  1218.         }
  1219.         cell->uncovered_area += sign*(y2 - y.quo)*fx2;
  1220.         cell->covered_height += sign*(y2 - y.quo);
  1221.         sweep_line->coverage.cursor = cell;
  1222.     }
  1223. }
  1224.  
  1225. inline static void
  1226. full_inc_edge (edge_t *edge)
  1227. {
  1228.     edge->x.quo += edge->dxdy_full.quo;
  1229.     edge->x.rem += edge->dxdy_full.rem;
  1230.     if (edge->x.rem >= 0) {
  1231.         ++edge->x.quo;
  1232.         edge->x.rem -= edge->dy;
  1233.     }
  1234. }
  1235.  
  1236. static void
  1237. full_add_edge (sweep_line_t *sweep_line, edge_t *edge, int sign)
  1238. {
  1239.     struct cell *cell;
  1240.     cairo_fixed_t x1, x2;
  1241.     int ix1, ix2;
  1242.     int frac;
  1243.  
  1244.     edge->current_sign = sign;
  1245.  
  1246.     ix1 = _cairo_fixed_integer_part (edge->x.quo);
  1247.  
  1248.     if (edge->vertical) {
  1249.         frac = _cairo_fixed_fractional_part (edge->x.quo);
  1250.         cell = coverage_find (sweep_line, ix1);
  1251.         cell->covered_height += sign * STEP_Y;
  1252.         cell->uncovered_area += sign * 2 * frac * STEP_Y;
  1253.         return;
  1254.     }
  1255.  
  1256.     x1 = edge->x.quo;
  1257.     full_inc_edge (edge);
  1258.     x2 = edge->x.quo;
  1259.  
  1260.     ix2 = _cairo_fixed_integer_part (edge->x.quo);
  1261.  
  1262.     /* Edge is entirely within a column? */
  1263.     if (likely (ix1 == ix2)) {
  1264.         frac = _cairo_fixed_fractional_part (x1) +
  1265.                _cairo_fixed_fractional_part (x2);
  1266.         cell = coverage_find (sweep_line, ix1);
  1267.         cell->covered_height += sign * STEP_Y;
  1268.         cell->uncovered_area += sign * frac * STEP_Y;
  1269.         return;
  1270.     }
  1271.  
  1272.     coverage_render_cells (sweep_line, x1, x2, 0, STEP_Y, sign);
  1273. }
  1274.  
  1275. static void
  1276. full_nonzero (sweep_line_t *sweep_line)
  1277. {
  1278.     cairo_list_t *pos;
  1279.  
  1280.     sweep_line->is_vertical = TRUE;
  1281.     pos = sweep_line->active.next;
  1282.     do {
  1283.         edge_t *left = link_to_edge (pos), *right;
  1284.         int winding = left->edge.dir;
  1285.  
  1286.         sweep_line->is_vertical &= left->vertical;
  1287.  
  1288.         pos = left->link.next;
  1289.         do {
  1290.             if (unlikely (pos == &sweep_line->active)) {
  1291.                 full_add_edge (sweep_line, left, +1);
  1292.                 return;
  1293.             }
  1294.  
  1295.             right = link_to_edge (pos);
  1296.             pos = pos->next;
  1297.             sweep_line->is_vertical &= right->vertical;
  1298.  
  1299.             winding += right->edge.dir;
  1300.             if (0 == winding) {
  1301.                 if (pos == &sweep_line->active ||
  1302.                     link_to_edge (pos)->x.quo != right->x.quo)
  1303.                 {
  1304.                     break;
  1305.                 }
  1306.             }
  1307.  
  1308.             if (! right->vertical)
  1309.                 full_inc_edge (right);
  1310.         } while (TRUE);
  1311.  
  1312.         full_add_edge (sweep_line, left,  +1);
  1313.         full_add_edge (sweep_line, right, -1);
  1314.     } while (pos != &sweep_line->active);
  1315. }
  1316.  
  1317. static void
  1318. full_evenodd (sweep_line_t *sweep_line)
  1319. {
  1320.     cairo_list_t *pos;
  1321.  
  1322.     sweep_line->is_vertical = TRUE;
  1323.     pos = sweep_line->active.next;
  1324.     do {
  1325.         edge_t *left = link_to_edge (pos), *right;
  1326.         int winding = 0;
  1327.  
  1328.         sweep_line->is_vertical &= left->vertical;
  1329.  
  1330.         pos = left->link.next;
  1331.         do {
  1332.             if (pos == &sweep_line->active) {
  1333.                 full_add_edge (sweep_line, left, +1);
  1334.                 return;
  1335.             }
  1336.  
  1337.             right = link_to_edge (pos);
  1338.             pos = pos->next;
  1339.             sweep_line->is_vertical &= right->vertical;
  1340.  
  1341.             if (++winding & 1) {
  1342.                 if (pos == &sweep_line->active ||
  1343.                     link_to_edge (pos)->x.quo != right->x.quo)
  1344.                 {
  1345.                     break;
  1346.                 }
  1347.             }
  1348.  
  1349.             if (! right->vertical)
  1350.                 full_inc_edge (right);
  1351.         } while (TRUE);
  1352.  
  1353.         full_add_edge (sweep_line, left,  +1);
  1354.         full_add_edge (sweep_line, right, -1);
  1355.     } while (pos != &sweep_line->active);
  1356. }
  1357.  
  1358. static void
  1359. render_rows (cairo_botor_scan_converter_t *self,
  1360.              sweep_line_t *sweep_line,
  1361.              int y, int height,
  1362.              cairo_span_renderer_t *renderer)
  1363. {
  1364.     cairo_half_open_span_t spans_stack[CAIRO_STACK_ARRAY_LENGTH (cairo_half_open_span_t)];
  1365.     cairo_half_open_span_t *spans = spans_stack;
  1366.     struct cell *cell;
  1367.     int prev_x, cover;
  1368.     int num_spans;
  1369.     cairo_status_t status;
  1370.  
  1371.     if (unlikely (sweep_line->coverage.count == 0)) {
  1372.         status = renderer->render_rows (renderer, y, height, NULL, 0);
  1373.         if (unlikely (status))
  1374.             longjmp (sweep_line->unwind, status);
  1375.         return;
  1376.     }
  1377.  
  1378.     /* Allocate enough spans for the row. */
  1379.  
  1380.     num_spans = 2*sweep_line->coverage.count+2;
  1381.     if (unlikely (num_spans > ARRAY_LENGTH (spans_stack))) {
  1382.         spans = _cairo_malloc_ab (num_spans, sizeof (cairo_half_open_span_t));
  1383.         if (unlikely (spans == NULL)) {
  1384.             longjmp (sweep_line->unwind,
  1385.                      _cairo_error (CAIRO_STATUS_NO_MEMORY));
  1386.         }
  1387.     }
  1388.  
  1389.     /* Form the spans from the coverage and areas. */
  1390.     num_spans = 0;
  1391.     prev_x = self->xmin;
  1392.     cover = 0;
  1393.     cell = sweep_line->coverage.head.next;
  1394.     do {
  1395.         int x = cell->x;
  1396.         int area;
  1397.  
  1398.         if (x > prev_x) {
  1399.             spans[num_spans].x = prev_x;
  1400.             spans[num_spans].inverse = 0;
  1401.             spans[num_spans].coverage = AREA_TO_ALPHA (cover);
  1402.             ++num_spans;
  1403.         }
  1404.  
  1405.         cover += cell->covered_height*STEP_X*2;
  1406.         area = cover - cell->uncovered_area;
  1407.  
  1408.         spans[num_spans].x = x;
  1409.         spans[num_spans].coverage = AREA_TO_ALPHA (area);
  1410.         ++num_spans;
  1411.  
  1412.         prev_x = x + 1;
  1413.     } while ((cell = cell->next) != &sweep_line->coverage.tail);
  1414.  
  1415.     if (prev_x <= self->xmax) {
  1416.         spans[num_spans].x = prev_x;
  1417.         spans[num_spans].inverse = 0;
  1418.         spans[num_spans].coverage = AREA_TO_ALPHA (cover);
  1419.         ++num_spans;
  1420.     }
  1421.  
  1422.     if (cover && prev_x < self->xmax) {
  1423.         spans[num_spans].x = self->xmax;
  1424.         spans[num_spans].inverse = 1;
  1425.         spans[num_spans].coverage = 0;
  1426.         ++num_spans;
  1427.     }
  1428.  
  1429.     status = renderer->render_rows (renderer, y, height, spans, num_spans);
  1430.  
  1431.     if (unlikely (spans != spans_stack))
  1432.         free (spans);
  1433.  
  1434.     coverage_reset (&sweep_line->coverage);
  1435.  
  1436.     if (unlikely (status))
  1437.         longjmp (sweep_line->unwind, status);
  1438. }
  1439.  
  1440. static void
  1441. full_repeat (sweep_line_t *sweep)
  1442. {
  1443.     edge_t *edge;
  1444.  
  1445.     cairo_list_foreach_entry (edge, edge_t, &sweep->active, link) {
  1446.         if (edge->current_sign)
  1447.             full_add_edge (sweep, edge, edge->current_sign);
  1448.         else if (! edge->vertical)
  1449.             full_inc_edge (edge);
  1450.     }
  1451. }
  1452.  
  1453. static void
  1454. full_reset (sweep_line_t *sweep)
  1455. {
  1456.     edge_t *edge;
  1457.  
  1458.     cairo_list_foreach_entry (edge, edge_t, &sweep->active, link)
  1459.         edge->current_sign = 0;
  1460. }
  1461.  
  1462. static void
  1463. full_step (cairo_botor_scan_converter_t *self,
  1464.            sweep_line_t *sweep_line,
  1465.            cairo_fixed_t row,
  1466.            cairo_span_renderer_t *renderer)
  1467. {
  1468.     int top, bottom;
  1469.  
  1470.     top = _cairo_fixed_integer_part (sweep_line->current_row);
  1471.     bottom = _cairo_fixed_integer_part (row);
  1472.     if (cairo_list_is_empty (&sweep_line->active)) {
  1473.         cairo_status_t  status;
  1474.  
  1475.         status = renderer->render_rows (renderer, top, bottom - top, NULL, 0);
  1476.         if (unlikely (status))
  1477.             longjmp (sweep_line->unwind, status);
  1478.  
  1479.         return;
  1480.     }
  1481.  
  1482.     if (self->fill_rule == CAIRO_FILL_RULE_WINDING)
  1483.         full_nonzero (sweep_line);
  1484.     else
  1485.         full_evenodd (sweep_line);
  1486.  
  1487.     if (sweep_line->is_vertical || bottom == top + 1) {
  1488.         render_rows (self, sweep_line, top, bottom - top, renderer);
  1489.         full_reset (sweep_line);
  1490.         return;
  1491.     }
  1492.  
  1493.     render_rows (self, sweep_line, top++, 1, renderer);
  1494.     do {
  1495.         full_repeat (sweep_line);
  1496.         render_rows (self, sweep_line, top, 1, renderer);
  1497.     } while (++top != bottom);
  1498.  
  1499.     full_reset (sweep_line);
  1500. }
  1501.  
  1502. cairo_always_inline static void
  1503. sub_inc_edge (edge_t *edge,
  1504.               cairo_fixed_t height)
  1505. {
  1506.     if (height == 1) {
  1507.         edge->x.quo += edge->dxdy.quo;
  1508.         edge->x.rem += edge->dxdy.rem;
  1509.         if (edge->x.rem >= 0) {
  1510.             ++edge->x.quo;
  1511.             edge->x.rem -= edge->dy;
  1512.         }
  1513.     } else {
  1514.         edge->x.quo += height * edge->dxdy.quo;
  1515.         edge->x.rem += height * edge->dxdy.rem;
  1516.         if (edge->x.rem >= 0) {
  1517.             int carry = edge->x.rem / edge->dy + 1;
  1518.             edge->x.quo += carry;
  1519.             edge->x.rem -= carry * edge->dy;
  1520.         }
  1521.     }
  1522. }
  1523.  
  1524. static void
  1525. sub_add_run (sweep_line_t *sweep_line, edge_t *edge, int y, int sign)
  1526. {
  1527.     struct run *run;
  1528.  
  1529.     run = _cairo_freepool_alloc (&sweep_line->runs);
  1530.     if (unlikely (run == NULL))
  1531.         longjmp (sweep_line->unwind, _cairo_error (CAIRO_STATUS_NO_MEMORY));
  1532.  
  1533.     run->y = y;
  1534.     run->sign = sign;
  1535.     run->next = edge->runs;
  1536.     edge->runs = run;
  1537.  
  1538.     edge->current_sign = sign;
  1539. }
  1540.  
  1541. inline static cairo_bool_t
  1542. edges_coincident (edge_t *left, edge_t *right, cairo_fixed_t y)
  1543. {
  1544.     /* XXX is compare_x_for_y() worth executing during sub steps? */
  1545.     return line_equal (&left->edge.line, &right->edge.line);
  1546.     //edges_compare_x_for_y (&left->edge, &right->edge, y) >= 0;
  1547. }
  1548.  
  1549. static void
  1550. sub_nonzero (sweep_line_t *sweep_line)
  1551. {
  1552.     cairo_fixed_t y = sweep_line->current_subrow;
  1553.     cairo_fixed_t fy = _cairo_fixed_fractional_part (y);
  1554.     cairo_list_t *pos;
  1555.  
  1556.     pos = sweep_line->active.next;
  1557.     do {
  1558.         edge_t *left = link_to_edge (pos), *right;
  1559.         int winding = left->edge.dir;
  1560.  
  1561.         pos = left->link.next;
  1562.         do {
  1563.             if (unlikely (pos == &sweep_line->active)) {
  1564.                 if (left->current_sign != +1)
  1565.                     sub_add_run (sweep_line, left, fy, +1);
  1566.                 return;
  1567.             }
  1568.  
  1569.             right = link_to_edge (pos);
  1570.             pos = pos->next;
  1571.  
  1572.             winding += right->edge.dir;
  1573.             if (0 == winding) {
  1574.                 if (pos == &sweep_line->active ||
  1575.                     ! edges_coincident (right, link_to_edge (pos), y))
  1576.                 {
  1577.                     break;
  1578.                 }
  1579.             }
  1580.  
  1581.             if (right->current_sign)
  1582.                 sub_add_run (sweep_line, right, fy, 0);
  1583.         } while (TRUE);
  1584.  
  1585.         if (left->current_sign != +1)
  1586.             sub_add_run (sweep_line, left, fy, +1);
  1587.         if (right->current_sign != -1)
  1588.             sub_add_run (sweep_line, right, fy, -1);
  1589.     } while (pos != &sweep_line->active);
  1590. }
  1591.  
  1592. static void
  1593. sub_evenodd (sweep_line_t *sweep_line)
  1594. {
  1595.     cairo_fixed_t y = sweep_line->current_subrow;
  1596.     cairo_fixed_t fy = _cairo_fixed_fractional_part (y);
  1597.     cairo_list_t *pos;
  1598.  
  1599.     pos = sweep_line->active.next;
  1600.     do {
  1601.         edge_t *left = link_to_edge (pos), *right;
  1602.         int winding = 0;
  1603.  
  1604.         pos = left->link.next;
  1605.         do {
  1606.             if (unlikely (pos == &sweep_line->active)) {
  1607.                 if (left->current_sign != +1)
  1608.                     sub_add_run (sweep_line, left, fy, +1);
  1609.                 return;
  1610.             }
  1611.  
  1612.             right = link_to_edge (pos);
  1613.             pos = pos->next;
  1614.  
  1615.             if (++winding & 1) {
  1616.                 if (pos == &sweep_line->active ||
  1617.                     ! edges_coincident (right, link_to_edge (pos), y))
  1618.                 {
  1619.                     break;
  1620.                 }
  1621.             }
  1622.  
  1623.             if (right->current_sign)
  1624.                 sub_add_run (sweep_line, right, fy, 0);
  1625.         } while (TRUE);
  1626.  
  1627.         if (left->current_sign != +1)
  1628.             sub_add_run (sweep_line, left, fy, +1);
  1629.         if (right->current_sign != -1)
  1630.             sub_add_run (sweep_line, right, fy, -1);
  1631.     } while (pos != &sweep_line->active);
  1632. }
  1633.  
  1634. cairo_always_inline static void
  1635. sub_step (cairo_botor_scan_converter_t *self,
  1636.           sweep_line_t *sweep_line)
  1637. {
  1638.     if (cairo_list_is_empty (&sweep_line->active))
  1639.         return;
  1640.  
  1641.     if (self->fill_rule == CAIRO_FILL_RULE_WINDING)
  1642.         sub_nonzero (sweep_line);
  1643.     else
  1644.         sub_evenodd (sweep_line);
  1645. }
  1646.  
  1647. static void
  1648. coverage_render_runs (sweep_line_t *sweep, edge_t *edge,
  1649.                       cairo_fixed_t y1, cairo_fixed_t y2)
  1650. {
  1651.     struct run tail;
  1652.     struct run *run = &tail;
  1653.  
  1654.     tail.next = NULL;
  1655.     tail.y = y2;
  1656.  
  1657.     /* Order the runs top->bottom */
  1658.     while (edge->runs) {
  1659.         struct run *r;
  1660.  
  1661.         r = edge->runs;
  1662.         edge->runs = r->next;
  1663.         r->next = run;
  1664.         run = r;
  1665.     }
  1666.  
  1667.     if (run->y > y1)
  1668.         sub_inc_edge (edge, run->y - y1);
  1669.  
  1670.     do {
  1671.         cairo_fixed_t x1, x2;
  1672.  
  1673.         y1 = run->y;
  1674.         y2 = run->next->y;
  1675.  
  1676.         x1 = edge->x.quo;
  1677.         if (y2 - y1 == STEP_Y)
  1678.             full_inc_edge (edge);
  1679.         else
  1680.             sub_inc_edge (edge, y2 - y1);
  1681.         x2 = edge->x.quo;
  1682.  
  1683.         if (run->sign) {
  1684.             int ix1, ix2;
  1685.  
  1686.             ix1 = _cairo_fixed_integer_part (x1);
  1687.             ix2 = _cairo_fixed_integer_part (x2);
  1688.  
  1689.             /* Edge is entirely within a column? */
  1690.             if (likely (ix1 == ix2)) {
  1691.                 struct cell *cell;
  1692.                 int frac;
  1693.  
  1694.                 frac = _cairo_fixed_fractional_part (x1) +
  1695.                        _cairo_fixed_fractional_part (x2);
  1696.                 cell = coverage_find (sweep, ix1);
  1697.                 cell->covered_height += run->sign * (y2 - y1);
  1698.                 cell->uncovered_area += run->sign * (y2 - y1) * frac;
  1699.             } else {
  1700.                 coverage_render_cells (sweep, x1, x2, y1, y2, run->sign);
  1701.             }
  1702.         }
  1703.  
  1704.         run = run->next;
  1705.     } while (run->next != NULL);
  1706. }
  1707.  
  1708. static void
  1709. coverage_render_vertical_runs (sweep_line_t *sweep, edge_t *edge, cairo_fixed_t y2)
  1710. {
  1711.     struct cell *cell;
  1712.     struct run *run;
  1713.     int height = 0;
  1714.  
  1715.     for (run = edge->runs; run != NULL; run = run->next) {
  1716.         if (run->sign)
  1717.             height += run->sign * (y2 - run->y);
  1718.         y2 = run->y;
  1719.     }
  1720.  
  1721.     cell = coverage_find (sweep, _cairo_fixed_integer_part (edge->x.quo));
  1722.     cell->covered_height += height;
  1723.     cell->uncovered_area += 2 * _cairo_fixed_fractional_part (edge->x.quo) * height;
  1724. }
  1725.  
  1726. cairo_always_inline static void
  1727. sub_emit (cairo_botor_scan_converter_t *self,
  1728.           sweep_line_t *sweep,
  1729.           cairo_span_renderer_t *renderer)
  1730. {
  1731.     edge_t *edge;
  1732.  
  1733.     sub_step (self, sweep);
  1734.  
  1735.     /* convert the runs into coverages */
  1736.  
  1737.     cairo_list_foreach_entry (edge, edge_t, &sweep->active, link) {
  1738.         if (edge->runs == NULL) {
  1739.             if (! edge->vertical) {
  1740.                 if (edge->flags & START) {
  1741.                     sub_inc_edge (edge,
  1742.                                   STEP_Y - _cairo_fixed_fractional_part (edge->edge.top));
  1743.                     edge->flags &= ~START;
  1744.                 } else
  1745.                     full_inc_edge (edge);
  1746.             }
  1747.         } else {
  1748.             if (edge->vertical) {
  1749.                 coverage_render_vertical_runs (sweep, edge, STEP_Y);
  1750.             } else {
  1751.                 int y1 = 0;
  1752.                 if (edge->flags & START) {
  1753.                     y1 = _cairo_fixed_fractional_part (edge->edge.top);
  1754.                     edge->flags &= ~START;
  1755.                 }
  1756.                 coverage_render_runs (sweep, edge, y1, STEP_Y);
  1757.             }
  1758.         }
  1759.         edge->current_sign = 0;
  1760.         edge->runs = NULL;
  1761.     }
  1762.  
  1763.     cairo_list_foreach_entry (edge, edge_t, &sweep->stopped, link) {
  1764.         int y2 = _cairo_fixed_fractional_part (edge->edge.bottom);
  1765.         if (edge->vertical) {
  1766.             coverage_render_vertical_runs (sweep, edge, y2);
  1767.         } else {
  1768.             int y1 = 0;
  1769.             if (edge->flags & START)
  1770.                 y1 = _cairo_fixed_fractional_part (edge->edge.top);
  1771.             coverage_render_runs (sweep, edge, y1, y2);
  1772.         }
  1773.     }
  1774.     cairo_list_init (&sweep->stopped);
  1775.  
  1776.     _cairo_freepool_reset (&sweep->runs);
  1777.  
  1778.     render_rows (self, sweep,
  1779.                  _cairo_fixed_integer_part (sweep->current_row), 1,
  1780.                  renderer);
  1781. }
  1782.  
  1783. static void
  1784. sweep_line_init (sweep_line_t    *sweep_line,
  1785.                  event_t        **start_events,
  1786.                  int              num_events)
  1787. {
  1788.     cairo_list_init (&sweep_line->active);
  1789.     cairo_list_init (&sweep_line->stopped);
  1790.     sweep_line->insert_cursor = &sweep_line->active;
  1791.  
  1792.     sweep_line->current_row = INT32_MIN;
  1793.     sweep_line->current_subrow = INT32_MIN;
  1794.  
  1795.     coverage_init (&sweep_line->coverage);
  1796.     _cairo_freepool_init (&sweep_line->runs, sizeof (struct run));
  1797.  
  1798.     start_event_sort (start_events, num_events);
  1799.     start_events[num_events] = NULL;
  1800.  
  1801.     sweep_line->queue.start_events = start_events;
  1802.  
  1803.     _cairo_freepool_init (&sweep_line->queue.pool,
  1804.                           sizeof (queue_event_t));
  1805.     pqueue_init (&sweep_line->queue.pq);
  1806.     sweep_line->queue.pq.elements[PQ_FIRST_ENTRY] = NULL;
  1807. }
  1808.  
  1809. static void
  1810. sweep_line_delete (sweep_line_t *sweep_line,
  1811.                    edge_t       *edge)
  1812. {
  1813.     if (sweep_line->insert_cursor == &edge->link)
  1814.         sweep_line->insert_cursor = edge->link.prev;
  1815.  
  1816.     cairo_list_del (&edge->link);
  1817.     if (edge->runs)
  1818.         cairo_list_add_tail (&edge->link, &sweep_line->stopped);
  1819.     edge->flags |= STOP;
  1820. }
  1821.  
  1822. static void
  1823. sweep_line_swap (sweep_line_t   *sweep_line,
  1824.                  edge_t *left,
  1825.                  edge_t *right)
  1826. {
  1827.     right->link.prev = left->link.prev;
  1828.     left->link.next = right->link.next;
  1829.     right->link.next = &left->link;
  1830.     left->link.prev = &right->link;
  1831.     left->link.next->prev = &left->link;
  1832.     right->link.prev->next = &right->link;
  1833. }
  1834.  
  1835. static void
  1836. sweep_line_fini (sweep_line_t *sweep_line)
  1837. {
  1838.     pqueue_fini (&sweep_line->queue.pq);
  1839.     _cairo_freepool_fini (&sweep_line->queue.pool);
  1840.     coverage_fini (&sweep_line->coverage);
  1841.     _cairo_freepool_fini (&sweep_line->runs);
  1842. }
  1843.  
  1844. static cairo_status_t
  1845. botor_generate (cairo_botor_scan_converter_t     *self,
  1846.                 event_t                         **start_events,
  1847.                 cairo_span_renderer_t            *renderer)
  1848. {
  1849.     cairo_status_t status;
  1850.     sweep_line_t sweep_line;
  1851.     cairo_fixed_t ybot;
  1852.     event_t *event;
  1853.     cairo_list_t *left, *right;
  1854.     edge_t *e1, *e2;
  1855.     int bottom;
  1856.  
  1857.     sweep_line_init (&sweep_line, start_events, self->num_edges);
  1858.     if ((status = setjmp (sweep_line.unwind)))
  1859.         goto unwind;
  1860.  
  1861.     ybot = self->extents.p2.y;
  1862.     sweep_line.current_subrow = self->extents.p1.y;
  1863.     sweep_line.current_row = _cairo_fixed_floor (self->extents.p1.y);
  1864.     event = *sweep_line.queue.start_events++;
  1865.     do {
  1866.         /* Can we process a full step in one go? */
  1867.         if (event->y >= sweep_line.current_row + STEP_Y) {
  1868.             bottom = _cairo_fixed_floor (event->y);
  1869.             full_step (self, &sweep_line, bottom, renderer);
  1870.             sweep_line.current_row = bottom;
  1871.             sweep_line.current_subrow = bottom;
  1872.         }
  1873.  
  1874.         do {
  1875.             if (event->y > sweep_line.current_subrow) {
  1876.                 sub_step (self, &sweep_line);
  1877.                 sweep_line.current_subrow = event->y;
  1878.             }
  1879.  
  1880.             do {
  1881.                 /* Update the active list using Bentley-Ottmann */
  1882.                 switch (event->type) {
  1883.                 case EVENT_TYPE_START:
  1884.                     e1 = ((start_event_t *) event)->edge;
  1885.  
  1886.                     sweep_line_insert (&sweep_line, e1);
  1887.                     event_insert_stop (&sweep_line, e1);
  1888.  
  1889.                     left = e1->link.prev;
  1890.                     right = e1->link.next;
  1891.  
  1892.                     if (left != &sweep_line.active) {
  1893.                         event_insert_if_intersect_below_current_y (&sweep_line,
  1894.                                                                    link_to_edge (left), e1);
  1895.                     }
  1896.  
  1897.                     if (right != &sweep_line.active) {
  1898.                         event_insert_if_intersect_below_current_y (&sweep_line,
  1899.                                                                    e1, link_to_edge (right));
  1900.                     }
  1901.  
  1902.                     break;
  1903.  
  1904.                 case EVENT_TYPE_STOP:
  1905.                     e1 = ((queue_event_t *) event)->e1;
  1906.                     event_delete (&sweep_line, event);
  1907.  
  1908.                     left = e1->link.prev;
  1909.                     right = e1->link.next;
  1910.  
  1911.                     sweep_line_delete (&sweep_line, e1);
  1912.  
  1913.                     if (left != &sweep_line.active &&
  1914.                         right != &sweep_line.active)
  1915.                     {
  1916.                          event_insert_if_intersect_below_current_y (&sweep_line,
  1917.                                                                     link_to_edge (left),
  1918.                                                                     link_to_edge (right));
  1919.                     }
  1920.  
  1921.                     break;
  1922.  
  1923.                 case EVENT_TYPE_INTERSECTION:
  1924.                     e1 = ((queue_event_t *) event)->e1;
  1925.                     e2 = ((queue_event_t *) event)->e2;
  1926.  
  1927.                     event_delete (&sweep_line, event);
  1928.                     if (e1->flags & STOP)
  1929.                         break;
  1930.                     if (e2->flags & STOP)
  1931.                         break;
  1932.  
  1933.                     /* skip this intersection if its edges are not adjacent */
  1934.                     if (&e2->link != e1->link.next)
  1935.                         break;
  1936.  
  1937.                     left = e1->link.prev;
  1938.                     right = e2->link.next;
  1939.  
  1940.                     sweep_line_swap (&sweep_line, e1, e2);
  1941.  
  1942.                     /* after the swap e2 is left of e1 */
  1943.                     if (left != &sweep_line.active) {
  1944.                         event_insert_if_intersect_below_current_y (&sweep_line,
  1945.                                                                    link_to_edge (left), e2);
  1946.                     }
  1947.  
  1948.                     if (right != &sweep_line.active) {
  1949.                         event_insert_if_intersect_below_current_y (&sweep_line,
  1950.                                                                    e1, link_to_edge (right));
  1951.                     }
  1952.  
  1953.                     break;
  1954.                 }
  1955.  
  1956.                 event = event_next (&sweep_line);
  1957.                 if (event == NULL)
  1958.                     goto end;
  1959.             } while (event->y == sweep_line.current_subrow);
  1960.         } while (event->y < sweep_line.current_row + STEP_Y);
  1961.  
  1962.         bottom = sweep_line.current_row + STEP_Y;
  1963.         sub_emit (self, &sweep_line, renderer);
  1964.         sweep_line.current_subrow = bottom;
  1965.         sweep_line.current_row = sweep_line.current_subrow;
  1966.     } while (TRUE);
  1967.  
  1968.   end:
  1969.     /* flush any partial spans */
  1970.     if (sweep_line.current_subrow != sweep_line.current_row) {
  1971.         sub_emit (self, &sweep_line, renderer);
  1972.         sweep_line.current_row += STEP_Y;
  1973.         sweep_line.current_subrow = sweep_line.current_row;
  1974.     }
  1975.     /* clear the rest */
  1976.     if (sweep_line.current_subrow < ybot) {
  1977.         bottom = _cairo_fixed_integer_part (sweep_line.current_row);
  1978.         status = renderer->render_rows (renderer,
  1979.                                         bottom, _cairo_fixed_integer_ceil (ybot) - bottom,
  1980.                                         NULL, 0);
  1981.     }
  1982.  
  1983.  unwind:
  1984.     sweep_line_fini (&sweep_line);
  1985.  
  1986.     return status;
  1987. }
  1988.  
  1989. static cairo_status_t
  1990. _cairo_botor_scan_converter_generate (void                      *converter,
  1991.                                       cairo_span_renderer_t     *renderer)
  1992. {
  1993.     cairo_botor_scan_converter_t *self = converter;
  1994.     start_event_t stack_events[CAIRO_STACK_ARRAY_LENGTH (start_event_t)];
  1995.     start_event_t *events;
  1996.     event_t *stack_event_ptrs[ARRAY_LENGTH (stack_events) + 1];
  1997.     event_t **event_ptrs;
  1998.     struct _cairo_botor_scan_converter_chunk *chunk;
  1999.     cairo_status_t status;
  2000.     int num_events;
  2001.     int i, j;
  2002.  
  2003.     num_events = self->num_edges;
  2004.     if (unlikely (0 == num_events)) {
  2005.         return renderer->render_rows (renderer,
  2006.                                       _cairo_fixed_integer_floor (self->extents.p1.y),
  2007.                                       _cairo_fixed_integer_ceil (self->extents.p2.y) -
  2008.                                       _cairo_fixed_integer_floor (self->extents.p1.y),
  2009.                                       NULL, 0);
  2010.     }
  2011.  
  2012.     events = stack_events;
  2013.     event_ptrs = stack_event_ptrs;
  2014.     if (unlikely (num_events >= ARRAY_LENGTH (stack_events))) {
  2015.         events = _cairo_malloc_ab_plus_c (num_events,
  2016.                                           sizeof (start_event_t) + sizeof (event_t *),
  2017.                                           sizeof (event_t *));
  2018.         if (unlikely (events == NULL))
  2019.             return _cairo_error (CAIRO_STATUS_NO_MEMORY);
  2020.  
  2021.         event_ptrs = (event_t **) (events + num_events);
  2022.     }
  2023.  
  2024.     j = 0;
  2025.     for (chunk = &self->chunks; chunk != NULL; chunk = chunk->next) {
  2026.         edge_t *edge;
  2027.  
  2028.         edge = chunk->base;
  2029.         for (i = 0; i < chunk->count; i++) {
  2030.             event_ptrs[j] = (event_t *) &events[j];
  2031.  
  2032.             events[j].y = edge->edge.top;
  2033.             events[j].type = EVENT_TYPE_START;
  2034.             events[j].edge = edge;
  2035.  
  2036.             edge++, j++;
  2037.         }
  2038.     }
  2039.  
  2040.     status = botor_generate (self, event_ptrs, renderer);
  2041.  
  2042.     if (events != stack_events)
  2043.         free (events);
  2044.  
  2045.     return status;
  2046. }
  2047.  
  2048. static edge_t *
  2049. botor_allocate_edge (cairo_botor_scan_converter_t *self)
  2050. {
  2051.     struct _cairo_botor_scan_converter_chunk *chunk;
  2052.  
  2053.     chunk = self->tail;
  2054.     if (chunk->count == chunk->size) {
  2055.         int size;
  2056.  
  2057.         size = chunk->size * 2;
  2058.         chunk->next = _cairo_malloc_ab_plus_c (size,
  2059.                                                sizeof (edge_t),
  2060.                                                sizeof (struct _cairo_botor_scan_converter_chunk));
  2061.         if (unlikely (chunk->next == NULL))
  2062.             return NULL;
  2063.  
  2064.         chunk = chunk->next;
  2065.         chunk->next = NULL;
  2066.         chunk->count = 0;
  2067.         chunk->size = size;
  2068.         chunk->base = chunk + 1;
  2069.         self->tail = chunk;
  2070.     }
  2071.  
  2072.     return (edge_t *) chunk->base + chunk->count++;
  2073. }
  2074.  
  2075. static cairo_status_t
  2076. botor_add_edge (cairo_botor_scan_converter_t *self,
  2077.                 const cairo_edge_t *edge)
  2078. {
  2079.     edge_t *e;
  2080.     cairo_fixed_t dx, dy;
  2081.  
  2082.     e = botor_allocate_edge (self);
  2083.     if (unlikely (e == NULL))
  2084.         return _cairo_error (CAIRO_STATUS_NO_MEMORY);
  2085.  
  2086.     cairo_list_init (&e->link);
  2087.     e->edge = *edge;
  2088.  
  2089.     dx = edge->line.p2.x - edge->line.p1.x;
  2090.     dy = edge->line.p2.y - edge->line.p1.y;
  2091.     e->dy = dy;
  2092.  
  2093.     if (dx == 0) {
  2094.         e->vertical = TRUE;
  2095.         e->x.quo = edge->line.p1.x;
  2096.         e->x.rem = 0;
  2097.         e->dxdy.quo = 0;
  2098.         e->dxdy.rem = 0;
  2099.         e->dxdy_full.quo = 0;
  2100.         e->dxdy_full.rem = 0;
  2101.     } else {
  2102.         e->vertical = FALSE;
  2103.         e->dxdy = floored_divrem (dx, dy);
  2104.         if (edge->top == edge->line.p1.y) {
  2105.             e->x.quo = edge->line.p1.x;
  2106.             e->x.rem = 0;
  2107.         } else {
  2108.             e->x = floored_muldivrem (edge->top - edge->line.p1.y,
  2109.                                       dx, dy);
  2110.             e->x.quo += edge->line.p1.x;
  2111.         }
  2112.  
  2113.         if (_cairo_fixed_integer_part (edge->bottom) - _cairo_fixed_integer_part (edge->top) > 1) {
  2114.             e->dxdy_full = floored_muldivrem (STEP_Y, dx, dy);
  2115.         } else {
  2116.             e->dxdy_full.quo = 0;
  2117.             e->dxdy_full.rem = 0;
  2118.         }
  2119.     }
  2120.  
  2121.     e->x.rem = -e->dy;
  2122.     e->current_sign = 0;
  2123.     e->runs = NULL;
  2124.     e->flags = START;
  2125.  
  2126.     self->num_edges++;
  2127.  
  2128.     return CAIRO_STATUS_SUCCESS;
  2129. }
  2130.  
  2131. static void
  2132. _cairo_botor_scan_converter_destroy (void *converter)
  2133. {
  2134.     cairo_botor_scan_converter_t *self = converter;
  2135.     struct _cairo_botor_scan_converter_chunk *chunk, *next;
  2136.  
  2137.     for (chunk = self->chunks.next; chunk != NULL; chunk = next) {
  2138.         next = chunk->next;
  2139.         free (chunk);
  2140.     }
  2141. }
  2142.  
  2143. void
  2144. _cairo_botor_scan_converter_init (cairo_botor_scan_converter_t *self,
  2145.                                   const cairo_box_t *extents,
  2146.                                   cairo_fill_rule_t fill_rule)
  2147. {
  2148.     self->base.destroy     = _cairo_botor_scan_converter_destroy;
  2149.     self->base.generate    = _cairo_botor_scan_converter_generate;
  2150.  
  2151.     self->extents   = *extents;
  2152.     self->fill_rule = fill_rule;
  2153.  
  2154.     self->xmin = _cairo_fixed_integer_floor (extents->p1.x);
  2155.     self->xmax = _cairo_fixed_integer_ceil (extents->p2.x);
  2156.  
  2157.     self->chunks.base = self->buf;
  2158.     self->chunks.next = NULL;
  2159.     self->chunks.count = 0;
  2160.     self->chunks.size = sizeof (self->buf) / sizeof (edge_t);
  2161.     self->tail = &self->chunks;
  2162.  
  2163.     self->num_edges = 0;
  2164. }
  2165.