Subversion Repositories Kolibri OS

Rev

Rev 1892 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /* cairo - a vector graphics library with display and print output
  2.  *
  3.  * Copyright © 2004 Keith Packard
  4.  *
  5.  * This library is free software; you can redistribute it and/or
  6.  * modify it either under the terms of the GNU Lesser General Public
  7.  * License version 2.1 as published by the Free Software Foundation
  8.  * (the "LGPL") or, at your option, under the terms of the Mozilla
  9.  * Public License Version 1.1 (the "MPL"). If you do not alter this
  10.  * notice, a recipient may use your version of this file under either
  11.  * the MPL or the LGPL.
  12.  *
  13.  * You should have received a copy of the LGPL along with this library
  14.  * in the file COPYING-LGPL-2.1; if not, write to the Free Software
  15.  * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
  16.  * You should have received a copy of the MPL along with this library
  17.  * in the file COPYING-MPL-1.1
  18.  *
  19.  * The contents of this file are subject to the Mozilla Public License
  20.  * Version 1.1 (the "License"); you may not use this file except in
  21.  * compliance with the License. You may obtain a copy of the License at
  22.  * http://www.mozilla.org/MPL/
  23.  *
  24.  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
  25.  * OF ANY KIND, either express or implied. See the LGPL or the MPL for
  26.  * the specific language governing rights and limitations.
  27.  *
  28.  * The Original Code is the cairo graphics library.
  29.  *
  30.  * The Initial Developer of the Original Code is Keith Packard
  31.  *
  32.  * Contributor(s):
  33.  *      Keith R. Packard <keithp@keithp.com>
  34.  */
  35.  
  36. #include "cairoint.h"
  37.  
  38. #if HAVE_UINT64_T
  39.  
  40. #define uint64_lo32(i)  ((i) & 0xffffffff)
  41. #define uint64_hi32(i)  ((i) >> 32)
  42. #define uint64_lo(i)    ((i) & 0xffffffff)
  43. #define uint64_hi(i)    ((i) >> 32)
  44. #define uint64_shift32(i)   ((i) << 32)
  45. #define uint64_carry32  (((uint64_t) 1) << 32)
  46.  
  47. #define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
  48.  
  49. #else
  50.  
  51. #define uint64_lo32(i)  ((i).lo)
  52. #define uint64_hi32(i)  ((i).hi)
  53.  
  54. static cairo_uint64_t
  55. uint64_lo (cairo_uint64_t i)
  56. {
  57.     cairo_uint64_t  s;
  58.  
  59.     s.lo = i.lo;
  60.     s.hi = 0;
  61.     return s;
  62. }
  63.  
  64. static cairo_uint64_t
  65. uint64_hi (cairo_uint64_t i)
  66. {
  67.     cairo_uint64_t  s;
  68.  
  69.     s.lo = i.hi;
  70.     s.hi = 0;
  71.     return s;
  72. }
  73.  
  74. static cairo_uint64_t
  75. uint64_shift32 (cairo_uint64_t i)
  76. {
  77.     cairo_uint64_t  s;
  78.  
  79.     s.lo = 0;
  80.     s.hi = i.lo;
  81.     return s;
  82. }
  83.  
  84. static const cairo_uint64_t uint64_carry32 = { 0, 1 };
  85.  
  86. cairo_uint64_t
  87. _cairo_double_to_uint64 (double i)
  88. {
  89.     cairo_uint64_t      q;
  90.  
  91.     q.hi = i * (1. / 4294967296.);
  92.     q.lo = i - q.hi * 4294967296.;
  93.     return q;
  94. }
  95.  
  96. double
  97. _cairo_uint64_to_double (cairo_uint64_t i)
  98. {
  99.     return i.hi * 4294967296. + i.lo;
  100. }
  101.  
  102. cairo_int64_t
  103. _cairo_double_to_int64 (double i)
  104. {
  105.     cairo_uint64_t      q;
  106.  
  107.     q.hi = i * (1. / INT32_MAX);
  108.     q.lo = i - q.hi * (double)INT32_MAX;
  109.     return q;
  110. }
  111.  
  112. double
  113. _cairo_int64_to_double (cairo_int64_t i)
  114. {
  115.     return i.hi * INT32_MAX + i.lo;
  116. }
  117.  
  118. cairo_uint64_t
  119. _cairo_uint32_to_uint64 (uint32_t i)
  120. {
  121.     cairo_uint64_t      q;
  122.  
  123.     q.lo = i;
  124.     q.hi = 0;
  125.     return q;
  126. }
  127.  
  128. cairo_int64_t
  129. _cairo_int32_to_int64 (int32_t i)
  130. {
  131.     cairo_uint64_t      q;
  132.  
  133.     q.lo = i;
  134.     q.hi = i < 0 ? -1 : 0;
  135.     return q;
  136. }
  137.  
  138. static cairo_uint64_t
  139. _cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
  140. {
  141.     cairo_uint64_t      q;
  142.  
  143.     q.lo = l;
  144.     q.hi = h;
  145.     return q;
  146. }
  147.  
  148. cairo_uint64_t
  149. _cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
  150. {
  151.     cairo_uint64_t      s;
  152.  
  153.     s.hi = a.hi + b.hi;
  154.     s.lo = a.lo + b.lo;
  155.     if (s.lo < a.lo)
  156.         s.hi++;
  157.     return s;
  158. }
  159.  
  160. cairo_uint64_t
  161. _cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
  162. {
  163.     cairo_uint64_t      s;
  164.  
  165.     s.hi = a.hi - b.hi;
  166.     s.lo = a.lo - b.lo;
  167.     if (s.lo > a.lo)
  168.         s.hi--;
  169.     return s;
  170. }
  171.  
  172. #define uint32_lo(i)    ((i) & 0xffff)
  173. #define uint32_hi(i)    ((i) >> 16)
  174. #define uint32_carry16  ((1) << 16)
  175.  
  176. cairo_uint64_t
  177. _cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
  178. {
  179.     cairo_uint64_t  s;
  180.  
  181.     uint16_t    ah, al, bh, bl;
  182.     uint32_t    r0, r1, r2, r3;
  183.  
  184.     al = uint32_lo (a);
  185.     ah = uint32_hi (a);
  186.     bl = uint32_lo (b);
  187.     bh = uint32_hi (b);
  188.  
  189.     r0 = (uint32_t) al * bl;
  190.     r1 = (uint32_t) al * bh;
  191.     r2 = (uint32_t) ah * bl;
  192.     r3 = (uint32_t) ah * bh;
  193.  
  194.     r1 += uint32_hi(r0);    /* no carry possible */
  195.     r1 += r2;               /* but this can carry */
  196.     if (r1 < r2)            /* check */
  197.         r3 += uint32_carry16;
  198.  
  199.     s.hi = r3 + uint32_hi(r1);
  200.     s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
  201.     return s;
  202. }
  203.  
  204. cairo_int64_t
  205. _cairo_int32x32_64_mul (int32_t a, int32_t b)
  206. {
  207.     cairo_int64_t s;
  208.     s = _cairo_uint32x32_64_mul ((uint32_t) a, (uint32_t) b);
  209.     if (a < 0)
  210.         s.hi -= b;
  211.     if (b < 0)
  212.         s.hi -= a;
  213.     return s;
  214. }
  215.  
  216. cairo_uint64_t
  217. _cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
  218. {
  219.     cairo_uint64_t      s;
  220.  
  221.     s = _cairo_uint32x32_64_mul (a.lo, b.lo);
  222.     s.hi += a.lo * b.hi + a.hi * b.lo;
  223.     return s;
  224. }
  225.  
  226. cairo_uint64_t
  227. _cairo_uint64_lsl (cairo_uint64_t a, int shift)
  228. {
  229.     if (shift >= 32)
  230.     {
  231.         a.hi = a.lo;
  232.         a.lo = 0;
  233.         shift -= 32;
  234.     }
  235.     if (shift)
  236.     {
  237.         a.hi = a.hi << shift | a.lo >> (32 - shift);
  238.         a.lo = a.lo << shift;
  239.     }
  240.     return a;
  241. }
  242.  
  243. cairo_uint64_t
  244. _cairo_uint64_rsl (cairo_uint64_t a, int shift)
  245. {
  246.     if (shift >= 32)
  247.     {
  248.         a.lo = a.hi;
  249.         a.hi = 0;
  250.         shift -= 32;
  251.     }
  252.     if (shift)
  253.     {
  254.         a.lo = a.lo >> shift | a.hi << (32 - shift);
  255.         a.hi = a.hi >> shift;
  256.     }
  257.     return a;
  258. }
  259.  
  260. #define _cairo_uint32_rsa(a,n)  ((uint32_t) (((int32_t) (a)) >> (n)))
  261.  
  262. cairo_int64_t
  263. _cairo_uint64_rsa (cairo_int64_t a, int shift)
  264. {
  265.     if (shift >= 32)
  266.     {
  267.         a.lo = a.hi;
  268.         a.hi = _cairo_uint32_rsa (a.hi, 31);
  269.         shift -= 32;
  270.     }
  271.     if (shift)
  272.     {
  273.         a.lo = a.lo >> shift | a.hi << (32 - shift);
  274.         a.hi = _cairo_uint32_rsa (a.hi, shift);
  275.     }
  276.     return a;
  277. }
  278.  
  279. int
  280. _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
  281. {
  282.     return (a.hi < b.hi ||
  283.             (a.hi == b.hi && a.lo < b.lo));
  284. }
  285.  
  286. int
  287. _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
  288. {
  289.     return a.hi == b.hi && a.lo == b.lo;
  290. }
  291.  
  292. int
  293. _cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
  294. {
  295.     if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
  296.         return 1;
  297.     if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
  298.         return 0;
  299.     return _cairo_uint64_lt (a, b);
  300. }
  301.  
  302. int
  303. _cairo_uint64_cmp (cairo_uint64_t a, cairo_uint64_t b)
  304. {
  305.     if (a.hi < b.hi)
  306.         return -1;
  307.     else if (a.hi > b.hi)
  308.         return 1;
  309.     else if (a.lo < b.lo)
  310.         return -1;
  311.     else if (a.lo > b.lo)
  312.         return 1;
  313.     else
  314.         return 0;
  315. }
  316.  
  317. int
  318. _cairo_int64_cmp (cairo_int64_t a, cairo_int64_t b)
  319. {
  320.     if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
  321.         return -1;
  322.     if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
  323.         return 1;
  324.  
  325.     return _cairo_uint64_cmp (a, b);
  326. }
  327.  
  328. cairo_uint64_t
  329. _cairo_uint64_not (cairo_uint64_t a)
  330. {
  331.     a.lo = ~a.lo;
  332.     a.hi = ~a.hi;
  333.     return a;
  334. }
  335.  
  336. cairo_uint64_t
  337. _cairo_uint64_negate (cairo_uint64_t a)
  338. {
  339.     a.lo = ~a.lo;
  340.     a.hi = ~a.hi;
  341.     if (++a.lo == 0)
  342.         ++a.hi;
  343.     return a;
  344. }
  345.  
  346. /*
  347.  * Simple bit-at-a-time divide.
  348.  */
  349. cairo_uquorem64_t
  350. _cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
  351. {
  352.     cairo_uquorem64_t   qr;
  353.     cairo_uint64_t      bit;
  354.     cairo_uint64_t      quo;
  355.  
  356.     bit = _cairo_uint32_to_uint64 (1);
  357.  
  358.     /* normalize to make den >= num, but not overflow */
  359.     while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
  360.     {
  361.         bit = _cairo_uint64_lsl (bit, 1);
  362.         den = _cairo_uint64_lsl (den, 1);
  363.     }
  364.     quo = _cairo_uint32_to_uint64 (0);
  365.  
  366.     /* generate quotient, one bit at a time */
  367.     while (bit.hi | bit.lo)
  368.     {
  369.         if (_cairo_uint64_le (den, num))
  370.         {
  371.             num = _cairo_uint64_sub (num, den);
  372.             quo = _cairo_uint64_add (quo, bit);
  373.         }
  374.         bit = _cairo_uint64_rsl (bit, 1);
  375.         den = _cairo_uint64_rsl (den, 1);
  376.     }
  377.     qr.quo = quo;
  378.     qr.rem = num;
  379.     return qr;
  380. }
  381.  
  382. #endif /* !HAVE_UINT64_T */
  383.  
  384. #if HAVE_UINT128_T
  385. cairo_uquorem128_t
  386. _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
  387. {
  388.     cairo_uquorem128_t  qr;
  389.  
  390.     qr.quo = num / den;
  391.     qr.rem = num % den;
  392.     return qr;
  393. }
  394.  
  395. #else
  396.  
  397. cairo_uint128_t
  398. _cairo_uint32_to_uint128 (uint32_t i)
  399. {
  400.     cairo_uint128_t     q;
  401.  
  402.     q.lo = _cairo_uint32_to_uint64 (i);
  403.     q.hi = _cairo_uint32_to_uint64 (0);
  404.     return q;
  405. }
  406.  
  407. cairo_int128_t
  408. _cairo_int32_to_int128 (int32_t i)
  409. {
  410.     cairo_int128_t      q;
  411.  
  412.     q.lo = _cairo_int32_to_int64 (i);
  413.     q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
  414.     return q;
  415. }
  416.  
  417. cairo_uint128_t
  418. _cairo_uint64_to_uint128 (cairo_uint64_t i)
  419. {
  420.     cairo_uint128_t     q;
  421.  
  422.     q.lo = i;
  423.     q.hi = _cairo_uint32_to_uint64 (0);
  424.     return q;
  425. }
  426.  
  427. cairo_int128_t
  428. _cairo_int64_to_int128 (cairo_int64_t i)
  429. {
  430.     cairo_int128_t      q;
  431.  
  432.     q.lo = i;
  433.     q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
  434.     return q;
  435. }
  436.  
  437. cairo_uint128_t
  438. _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
  439. {
  440.     cairo_uint128_t     s;
  441.  
  442.     s.hi = _cairo_uint64_add (a.hi, b.hi);
  443.     s.lo = _cairo_uint64_add (a.lo, b.lo);
  444.     if (_cairo_uint64_lt (s.lo, a.lo))
  445.         s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
  446.     return s;
  447. }
  448.  
  449. cairo_uint128_t
  450. _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
  451. {
  452.     cairo_uint128_t     s;
  453.  
  454.     s.hi = _cairo_uint64_sub (a.hi, b.hi);
  455.     s.lo = _cairo_uint64_sub (a.lo, b.lo);
  456.     if (_cairo_uint64_gt (s.lo, a.lo))
  457.         s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
  458.     return s;
  459. }
  460.  
  461. cairo_uint128_t
  462. _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
  463. {
  464.     cairo_uint128_t     s;
  465.     uint32_t            ah, al, bh, bl;
  466.     cairo_uint64_t      r0, r1, r2, r3;
  467.  
  468.     al = uint64_lo32 (a);
  469.     ah = uint64_hi32 (a);
  470.     bl = uint64_lo32 (b);
  471.     bh = uint64_hi32 (b);
  472.  
  473.     r0 = _cairo_uint32x32_64_mul (al, bl);
  474.     r1 = _cairo_uint32x32_64_mul (al, bh);
  475.     r2 = _cairo_uint32x32_64_mul (ah, bl);
  476.     r3 = _cairo_uint32x32_64_mul (ah, bh);
  477.  
  478.     r1 = _cairo_uint64_add (r1, uint64_hi (r0));    /* no carry possible */
  479.     r1 = _cairo_uint64_add (r1, r2);                /* but this can carry */
  480.     if (_cairo_uint64_lt (r1, r2))                  /* check */
  481.         r3 = _cairo_uint64_add (r3, uint64_carry32);
  482.  
  483.     s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
  484.     s.lo = _cairo_uint64_add (uint64_shift32 (r1),
  485.                                 uint64_lo (r0));
  486.     return s;
  487. }
  488.  
  489. cairo_int128_t
  490. _cairo_int64x64_128_mul (cairo_int64_t a, cairo_int64_t b)
  491. {
  492.     cairo_int128_t  s;
  493.     s = _cairo_uint64x64_128_mul (_cairo_int64_to_uint64(a),
  494.                                   _cairo_int64_to_uint64(b));
  495.     if (_cairo_int64_negative (a))
  496.         s.hi = _cairo_uint64_sub (s.hi,
  497.                                   _cairo_int64_to_uint64 (b));
  498.     if (_cairo_int64_negative (b))
  499.         s.hi = _cairo_uint64_sub (s.hi,
  500.                                   _cairo_int64_to_uint64 (a));
  501.     return s;
  502. }
  503.  
  504. cairo_uint128_t
  505. _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
  506. {
  507.     cairo_uint128_t     s;
  508.  
  509.     s = _cairo_uint64x64_128_mul (a.lo, b.lo);
  510.     s.hi = _cairo_uint64_add (s.hi,
  511.                                 _cairo_uint64_mul (a.lo, b.hi));
  512.     s.hi = _cairo_uint64_add (s.hi,
  513.                                 _cairo_uint64_mul (a.hi, b.lo));
  514.     return s;
  515. }
  516.  
  517. cairo_uint128_t
  518. _cairo_uint128_lsl (cairo_uint128_t a, int shift)
  519. {
  520.     if (shift >= 64)
  521.     {
  522.         a.hi = a.lo;
  523.         a.lo = _cairo_uint32_to_uint64 (0);
  524.         shift -= 64;
  525.     }
  526.     if (shift)
  527.     {
  528.         a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
  529.                                     _cairo_uint64_rsl (a.lo, (64 - shift)));
  530.         a.lo = _cairo_uint64_lsl (a.lo, shift);
  531.     }
  532.     return a;
  533. }
  534.  
  535. cairo_uint128_t
  536. _cairo_uint128_rsl (cairo_uint128_t a, int shift)
  537. {
  538.     if (shift >= 64)
  539.     {
  540.         a.lo = a.hi;
  541.         a.hi = _cairo_uint32_to_uint64 (0);
  542.         shift -= 64;
  543.     }
  544.     if (shift)
  545.     {
  546.         a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
  547.                                     _cairo_uint64_lsl (a.hi, (64 - shift)));
  548.         a.hi = _cairo_uint64_rsl (a.hi, shift);
  549.     }
  550.     return a;
  551. }
  552.  
  553. cairo_uint128_t
  554. _cairo_uint128_rsa (cairo_int128_t a, int shift)
  555. {
  556.     if (shift >= 64)
  557.     {
  558.         a.lo = a.hi;
  559.         a.hi = _cairo_uint64_rsa (a.hi, 64-1);
  560.         shift -= 64;
  561.     }
  562.     if (shift)
  563.     {
  564.         a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
  565.                                     _cairo_uint64_lsl (a.hi, (64 - shift)));
  566.         a.hi = _cairo_uint64_rsa (a.hi, shift);
  567.     }
  568.     return a;
  569. }
  570.  
  571. int
  572. _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
  573. {
  574.     return (_cairo_uint64_lt (a.hi, b.hi) ||
  575.             (_cairo_uint64_eq (a.hi, b.hi) &&
  576.              _cairo_uint64_lt (a.lo, b.lo)));
  577. }
  578.  
  579. int
  580. _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
  581. {
  582.     if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
  583.         return 1;
  584.     if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
  585.         return 0;
  586.     return _cairo_uint128_lt (a, b);
  587. }
  588.  
  589. int
  590. _cairo_uint128_cmp (cairo_uint128_t a, cairo_uint128_t b)
  591. {
  592.     int cmp;
  593.  
  594.     cmp = _cairo_uint64_cmp (a.hi, b.hi);
  595.     if (cmp)
  596.         return cmp;
  597.     return _cairo_uint64_cmp (a.lo, b.lo);
  598. }
  599.  
  600. int
  601. _cairo_int128_cmp (cairo_int128_t a, cairo_int128_t b)
  602. {
  603.     if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
  604.         return -1;
  605.     if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
  606.         return 1;
  607.  
  608.     return _cairo_uint128_cmp (a, b);
  609. }
  610.  
  611. int
  612. _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
  613. {
  614.     return (_cairo_uint64_eq (a.hi, b.hi) &&
  615.             _cairo_uint64_eq (a.lo, b.lo));
  616. }
  617.  
  618. #if HAVE_UINT64_T
  619. #define _cairo_msbset64(q)  (q & ((uint64_t) 1 << 63))
  620. #else
  621. #define _cairo_msbset64(q)  (q.hi & ((uint32_t) 1 << 31))
  622. #endif
  623.  
  624. cairo_uquorem128_t
  625. _cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
  626. {
  627.     cairo_uquorem128_t  qr;
  628.     cairo_uint128_t     bit;
  629.     cairo_uint128_t     quo;
  630.  
  631.     bit = _cairo_uint32_to_uint128 (1);
  632.  
  633.     /* normalize to make den >= num, but not overflow */
  634.     while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
  635.     {
  636.         bit = _cairo_uint128_lsl (bit, 1);
  637.         den = _cairo_uint128_lsl (den, 1);
  638.     }
  639.     quo = _cairo_uint32_to_uint128 (0);
  640.  
  641.     /* generate quotient, one bit at a time */
  642.     while (_cairo_uint128_ne (bit, _cairo_uint32_to_uint128(0)))
  643.     {
  644.         if (_cairo_uint128_le (den, num))
  645.         {
  646.             num = _cairo_uint128_sub (num, den);
  647.             quo = _cairo_uint128_add (quo, bit);
  648.         }
  649.         bit = _cairo_uint128_rsl (bit, 1);
  650.         den = _cairo_uint128_rsl (den, 1);
  651.     }
  652.     qr.quo = quo;
  653.     qr.rem = num;
  654.     return qr;
  655. }
  656.  
  657. cairo_int128_t
  658. _cairo_int128_negate (cairo_int128_t a)
  659. {
  660.     a.lo = _cairo_uint64_not (a.lo);
  661.     a.hi = _cairo_uint64_not (a.hi);
  662.     return _cairo_uint128_add (a, _cairo_uint32_to_uint128 (1));
  663. }
  664.  
  665. cairo_int128_t
  666. _cairo_int128_not (cairo_int128_t a)
  667. {
  668.     a.lo = _cairo_uint64_not (a.lo);
  669.     a.hi = _cairo_uint64_not (a.hi);
  670.     return a;
  671. }
  672.  
  673. #endif /* !HAVE_UINT128_T */
  674.  
  675. cairo_quorem128_t
  676. _cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
  677. {
  678.     int                 num_neg = _cairo_int128_negative (num);
  679.     int                 den_neg = _cairo_int128_negative (den);
  680.     cairo_uquorem128_t  uqr;
  681.     cairo_quorem128_t   qr;
  682.  
  683.     if (num_neg)
  684.         num = _cairo_int128_negate (num);
  685.     if (den_neg)
  686.         den = _cairo_int128_negate (den);
  687.     uqr = _cairo_uint128_divrem (num, den);
  688.     if (num_neg)
  689.         qr.rem = _cairo_int128_negate (uqr.rem);
  690.     else
  691.         qr.rem = uqr.rem;
  692.     if (num_neg != den_neg)
  693.         qr.quo = _cairo_int128_negate (uqr.quo);
  694.     else
  695.         qr.quo = uqr.quo;
  696.     return qr;
  697. }
  698.  
  699. /**
  700.  * _cairo_uint_96by64_32x64_divrem:
  701.  *
  702.  * Compute a 32 bit quotient and 64 bit remainder of a 96 bit unsigned
  703.  * dividend and 64 bit divisor.  If the quotient doesn't fit into 32
  704.  * bits then the returned remainder is equal to the divisor, and the
  705.  * quotient is the largest representable 64 bit integer.  It is an
  706.  * error to call this function with the high 32 bits of @num being
  707.  * non-zero.
  708.  **/
  709. cairo_uquorem64_t
  710. _cairo_uint_96by64_32x64_divrem (cairo_uint128_t num,
  711.                                  cairo_uint64_t den)
  712. {
  713.     cairo_uquorem64_t result;
  714.     cairo_uint64_t B = _cairo_uint32s_to_uint64 (1, 0);
  715.  
  716.     /* These are the high 64 bits of the *96* bit numerator.  We're
  717.      * going to represent the numerator as xB + y, where x is a 64,
  718.      * and y is a 32 bit number. */
  719.     cairo_uint64_t x = _cairo_uint128_to_uint64 (_cairo_uint128_rsl(num, 32));
  720.  
  721.     /* Initialise the result to indicate overflow. */
  722.     result.quo = _cairo_uint32s_to_uint64 (-1U, -1U);
  723.     result.rem = den;
  724.  
  725.     /* Don't bother if the quotient is going to overflow. */
  726.     if (_cairo_uint64_ge (x, den)) {
  727.         return /* overflow */ result;
  728.     }
  729.  
  730.     if (_cairo_uint64_lt (x, B)) {
  731.         /* When the final quotient is known to fit in 32 bits, then
  732.          * num < 2^64 if and only if den < 2^32. */
  733.         return _cairo_uint64_divrem (_cairo_uint128_to_uint64 (num), den);
  734.     }
  735.     else {
  736.         /* Denominator is >= 2^32. the numerator is >= 2^64, and the
  737.          * division won't overflow: need two divrems.  Write the
  738.          * numerator and denominator as
  739.          *
  740.          *      num = xB + y            x : 64 bits, y : 32 bits
  741.          *      den = uB + v            u, v : 32 bits
  742.          */
  743.         uint32_t y = _cairo_uint128_to_uint32 (num);
  744.         uint32_t u = uint64_hi32 (den);
  745.         uint32_t v = _cairo_uint64_to_uint32 (den);
  746.  
  747.         /* Compute a lower bound approximate quotient of num/den
  748.          * from x/(u+1).  Then we have
  749.          *
  750.          * x    = q(u+1) + r    ; q : 32 bits, r <= u : 32 bits.
  751.          *
  752.          * xB + y       = q(u+1)B       + (rB+y)
  753.          *              = q(uB + B + v - v) + (rB+y)
  754.          *              = q(uB + v)     + qB - qv + (rB+y)
  755.          *              = q(uB + v)     + q(B-v) + (rB+y)
  756.          *
  757.          * The true quotient of num/den then is q plus the
  758.          * contribution of q(B-v) + (rB+y).  The main contribution
  759.          * comes from the term q(B-v), with the term (rB+y) only
  760.          * contributing at most one part.
  761.          *
  762.          * The term q(B-v) must fit into 64 bits, since q fits into 32
  763.          * bits on account of being a lower bound to the true
  764.          * quotient, and as B-v <= 2^32, we may safely use a single
  765.          * 64/64 bit division to find its contribution. */
  766.  
  767.         cairo_uquorem64_t quorem;
  768.         cairo_uint64_t remainder; /* will contain final remainder */
  769.         uint32_t quotient;      /* will contain final quotient. */
  770.         uint32_t q;
  771.         uint32_t r;
  772.  
  773.         /* Approximate quotient by dividing the high 64 bits of num by
  774.          * u+1. Watch out for overflow of u+1. */
  775.         if (u+1) {
  776.             quorem = _cairo_uint64_divrem (x, _cairo_uint32_to_uint64 (u+1));
  777.             q = _cairo_uint64_to_uint32 (quorem.quo);
  778.             r = _cairo_uint64_to_uint32 (quorem.rem);
  779.         }
  780.         else {
  781.             q = uint64_hi32 (x);
  782.             r = _cairo_uint64_to_uint32 (x);
  783.         }
  784.         quotient = q;
  785.  
  786.         /* Add the main term's contribution to quotient.  Note B-v =
  787.          * -v as an uint32 (unless v = 0) */
  788.         if (v)
  789.             quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -v), den);
  790.         else
  791.             quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
  792.         quotient += _cairo_uint64_to_uint32 (quorem.quo);
  793.  
  794.         /* Add the contribution of the subterm and start computing the
  795.          * true remainder. */
  796.         remainder = _cairo_uint32s_to_uint64 (r, y);
  797.         if (_cairo_uint64_ge (remainder, den)) {
  798.             remainder = _cairo_uint64_sub (remainder, den);
  799.             quotient++;
  800.         }
  801.  
  802.         /* Add the contribution of the main term's remainder. The
  803.          * funky test here checks that remainder + main_rem >= den,
  804.          * taking into account overflow of the addition. */
  805.         remainder = _cairo_uint64_add (remainder, quorem.rem);
  806.         if (_cairo_uint64_ge (remainder, den) ||
  807.             _cairo_uint64_lt (remainder, quorem.rem))
  808.         {
  809.             remainder = _cairo_uint64_sub (remainder, den);
  810.             quotient++;
  811.         }
  812.  
  813.         result.quo = _cairo_uint32_to_uint64 (quotient);
  814.         result.rem = remainder;
  815.     }
  816.     return result;
  817. }
  818.  
  819. cairo_quorem64_t
  820. _cairo_int_96by64_32x64_divrem (cairo_int128_t num, cairo_int64_t den)
  821. {
  822.     int                 num_neg = _cairo_int128_negative (num);
  823.     int                 den_neg = _cairo_int64_negative (den);
  824.     cairo_uint64_t      nonneg_den;
  825.     cairo_uquorem64_t   uqr;
  826.     cairo_quorem64_t    qr;
  827.  
  828.     if (num_neg)
  829.         num = _cairo_int128_negate (num);
  830.     if (den_neg)
  831.         nonneg_den = _cairo_int64_negate (den);
  832.     else
  833.         nonneg_den = den;
  834.  
  835.     uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
  836.     if (_cairo_uint64_eq (uqr.rem, nonneg_den)) {
  837.         /* bail on overflow. */
  838.         qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, -1U);
  839.         qr.rem = den;
  840.         return qr;
  841.     }
  842.  
  843.     if (num_neg)
  844.         qr.rem = _cairo_int64_negate (uqr.rem);
  845.     else
  846.         qr.rem = uqr.rem;
  847.     if (num_neg != den_neg)
  848.         qr.quo = _cairo_int64_negate (uqr.quo);
  849.     else
  850.         qr.quo = uqr.quo;
  851.     return qr;
  852. }
  853.