Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Mesa 3-D graphics library
  3.  *
  4.  * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * Copyright (C) 2010 LunarG Inc.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the "Software"),
  9.  * to deal in the Software without restriction, including without limitation
  10.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11.  * and/or sell copies of the Software, and to permit persons to whom the
  12.  * Software is furnished to do so, subject to the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice shall be included
  15.  * in all copies or substantial portions of the Software.
  16.  *
  17.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  20.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  23.  * DEALINGS IN THE SOFTWARE.
  24.  */
  25.  
  26. #include "util/u_math.h"
  27. #include "util/u_memory.h"
  28.  
  29. #include "draw/draw_context.h"
  30. #include "draw/draw_private.h"
  31. #include "draw/draw_pt.h"
  32.  
  33. #define SEGMENT_SIZE 1024
  34. #define MAP_SIZE     256
  35.  
  36. /* The largest possible index withing an index buffer */
  37. #define MAX_ELT_IDX 0xffffffff
  38.  
  39. struct vsplit_frontend {
  40.    struct draw_pt_front_end base;
  41.    struct draw_context *draw;
  42.  
  43.    unsigned prim;
  44.  
  45.    struct draw_pt_middle_end *middle;
  46.  
  47.    unsigned max_vertices;
  48.    ushort segment_size;
  49.  
  50.    /* buffers for splitting */
  51.    unsigned fetch_elts[SEGMENT_SIZE];
  52.    ushort draw_elts[SEGMENT_SIZE];
  53.    ushort identity_draw_elts[SEGMENT_SIZE];
  54.  
  55.    struct {
  56.       /* map a fetch element to a draw element */
  57.       unsigned fetches[MAP_SIZE];
  58.       ushort draws[MAP_SIZE];
  59.       boolean has_max_fetch;
  60.  
  61.       ushort num_fetch_elts;
  62.       ushort num_draw_elts;
  63.    } cache;
  64. };
  65.  
  66.  
  67. static void
  68. vsplit_clear_cache(struct vsplit_frontend *vsplit)
  69. {
  70.    memset(vsplit->cache.fetches, 0xff, sizeof(vsplit->cache.fetches));
  71.    vsplit->cache.has_max_fetch = FALSE;
  72.    vsplit->cache.num_fetch_elts = 0;
  73.    vsplit->cache.num_draw_elts = 0;
  74. }
  75.  
  76. static void
  77. vsplit_flush_cache(struct vsplit_frontend *vsplit, unsigned flags)
  78. {
  79.    vsplit->middle->run(vsplit->middle,
  80.          vsplit->fetch_elts, vsplit->cache.num_fetch_elts,
  81.          vsplit->draw_elts, vsplit->cache.num_draw_elts, flags);
  82. }
  83.  
  84. /**
  85.  * Add a fetch element and add it to the draw elements.
  86.  */
  87. static INLINE void
  88. vsplit_add_cache(struct vsplit_frontend *vsplit, unsigned fetch, unsigned ofbias)
  89. {
  90.    unsigned hash;
  91.  
  92.    hash = fetch % MAP_SIZE;
  93.  
  94.    /* If the value isn't in the cache of it's an overflow due to the
  95.     * element bias */
  96.    if (vsplit->cache.fetches[hash] != fetch || ofbias) {
  97.       /* update cache */
  98.       vsplit->cache.fetches[hash] = fetch;
  99.       vsplit->cache.draws[hash] = vsplit->cache.num_fetch_elts;
  100.  
  101.       /* add fetch */
  102.       assert(vsplit->cache.num_fetch_elts < vsplit->segment_size);
  103.       vsplit->fetch_elts[vsplit->cache.num_fetch_elts++] = fetch;
  104.    }
  105.  
  106.    vsplit->draw_elts[vsplit->cache.num_draw_elts++] = vsplit->cache.draws[hash];
  107. }
  108.  
  109. /**
  110.  * Returns the base index to the elements array.
  111.  * The value is checked for overflows (both integer overflows
  112.  * and the elements array overflow).
  113.  */
  114. static INLINE unsigned
  115. vsplit_get_base_idx(struct vsplit_frontend *vsplit,
  116.                     unsigned start, unsigned fetch, unsigned *ofbit)
  117. {
  118.    struct draw_context *draw = vsplit->draw;
  119.    unsigned elt_idx = draw_overflow_uadd(start, fetch, MAX_ELT_IDX);
  120.    if (ofbit)
  121.       *ofbit = 0;
  122.  
  123.    /* Overflown indices need to wrap to the first element
  124.     * in the index buffer */
  125.    if (elt_idx >= draw->pt.user.eltMax) {
  126.       if (ofbit)
  127.          *ofbit = 1;
  128.       elt_idx = 0;
  129.    }
  130.  
  131.    return elt_idx;
  132. }
  133.  
  134. /**
  135.  * Returns the element index adjust for the element bias.
  136.  * The final element index is created from the actual element
  137.  * index, plus the element bias, clamped to maximum elememt
  138.  * index if that addition overflows.
  139.  */
  140. static INLINE unsigned
  141. vsplit_get_bias_idx(struct vsplit_frontend *vsplit,
  142.                     int idx, int bias, unsigned *ofbias)
  143. {
  144.    int res = idx + bias;
  145.  
  146.    if (ofbias)
  147.       *ofbias = 0;
  148.  
  149.    if (idx > 0 && bias > 0) {
  150.       if (res < idx || res < bias) {
  151.          res = DRAW_MAX_FETCH_IDX;
  152.          if (ofbias)
  153.             *ofbias = 1;
  154.       }
  155.    } else if (idx < 0 && bias < 0) {
  156.       if (res > idx || res > bias) {
  157.          res = DRAW_MAX_FETCH_IDX;
  158.          if (ofbias)
  159.             *ofbias = 1;
  160.       }
  161.    }
  162.  
  163.    return res;
  164. }
  165.  
  166. #define VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias)    \
  167.    unsigned elt_idx;                                       \
  168.    unsigned ofbit;                                         \
  169.    unsigned ofbias;                                        \
  170.    elt_idx = vsplit_get_base_idx(vsplit, start, fetch, &ofbit);          \
  171.    elt_idx = vsplit_get_bias_idx(vsplit, ofbit ? 0 : DRAW_GET_IDX(elts, elt_idx), elt_bias, &ofbias)
  172.  
  173. static INLINE void
  174. vsplit_add_cache_ubyte(struct vsplit_frontend *vsplit, const ubyte *elts,
  175.                        unsigned start, unsigned fetch, int elt_bias)
  176. {
  177.    struct draw_context *draw = vsplit->draw;
  178.    VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
  179.    vsplit_add_cache(vsplit, elt_idx, ofbias);
  180. }
  181.  
  182. static INLINE void
  183. vsplit_add_cache_ushort(struct vsplit_frontend *vsplit, const ushort *elts,
  184.                        unsigned start, unsigned fetch, int elt_bias)
  185. {
  186.    struct draw_context *draw = vsplit->draw;
  187.    VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
  188.    vsplit_add_cache(vsplit, elt_idx, ofbias);
  189. }
  190.  
  191.  
  192. /**
  193.  * Add a fetch element and add it to the draw elements.  The fetch element is
  194.  * in full range (uint).
  195.  */
  196. static INLINE void
  197. vsplit_add_cache_uint(struct vsplit_frontend *vsplit, const uint *elts,
  198.                       unsigned start, unsigned fetch, int elt_bias)
  199. {
  200.    struct draw_context *draw = vsplit->draw;
  201.    unsigned raw_elem_idx = start + fetch + elt_bias;
  202.    VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
  203.  
  204.    /* special care for DRAW_MAX_FETCH_IDX */
  205.    if (raw_elem_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
  206.       unsigned hash = fetch % MAP_SIZE;
  207.       vsplit->cache.fetches[hash] = raw_elem_idx - 1; /* force update */
  208.       vsplit->cache.has_max_fetch = TRUE;
  209.    }
  210.  
  211.    vsplit_add_cache(vsplit, elt_idx, ofbias);
  212. }
  213.  
  214.  
  215. #define FUNC vsplit_run_linear
  216. #include "draw_pt_vsplit_tmp.h"
  217.  
  218. #define FUNC vsplit_run_ubyte
  219. #define ELT_TYPE ubyte
  220. #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_ubyte(vsplit,ib,start,fetch,bias)
  221. #include "draw_pt_vsplit_tmp.h"
  222.  
  223. #define FUNC vsplit_run_ushort
  224. #define ELT_TYPE ushort
  225. #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_ushort(vsplit,ib,start,fetch, bias)
  226. #include "draw_pt_vsplit_tmp.h"
  227.  
  228. #define FUNC vsplit_run_uint
  229. #define ELT_TYPE uint
  230. #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_uint(vsplit, ib, start, fetch, bias)
  231. #include "draw_pt_vsplit_tmp.h"
  232.  
  233.  
  234. static void vsplit_prepare(struct draw_pt_front_end *frontend,
  235.                            unsigned in_prim,
  236.                            struct draw_pt_middle_end *middle,
  237.                            unsigned opt)
  238. {
  239.    struct vsplit_frontend *vsplit = (struct vsplit_frontend *) frontend;
  240.  
  241.    switch (vsplit->draw->pt.user.eltSize) {
  242.    case 0:
  243.       vsplit->base.run = vsplit_run_linear;
  244.       break;
  245.    case 1:
  246.       vsplit->base.run = vsplit_run_ubyte;
  247.       break;
  248.    case 2:
  249.       vsplit->base.run = vsplit_run_ushort;
  250.       break;
  251.    case 4:
  252.       vsplit->base.run = vsplit_run_uint;
  253.       break;
  254.    default:
  255.       assert(0);
  256.       break;
  257.    }
  258.  
  259.    /* split only */
  260.    vsplit->prim = in_prim;
  261.  
  262.    vsplit->middle = middle;
  263.    middle->prepare(middle, vsplit->prim, opt, &vsplit->max_vertices);
  264.  
  265.    vsplit->segment_size = MIN2(SEGMENT_SIZE, vsplit->max_vertices);
  266. }
  267.  
  268.  
  269. static void vsplit_flush(struct draw_pt_front_end *frontend, unsigned flags)
  270. {
  271.    struct vsplit_frontend *vsplit = (struct vsplit_frontend *) frontend;
  272.  
  273.    if (flags & DRAW_FLUSH_STATE_CHANGE) {
  274.       vsplit->middle->finish(vsplit->middle);
  275.       vsplit->middle = NULL;
  276.    }
  277. }
  278.  
  279.  
  280. static void vsplit_destroy(struct draw_pt_front_end *frontend)
  281. {
  282.    FREE(frontend);
  283. }
  284.  
  285.  
  286. struct draw_pt_front_end *draw_pt_vsplit(struct draw_context *draw)
  287. {
  288.    struct vsplit_frontend *vsplit = CALLOC_STRUCT(vsplit_frontend);
  289.    ushort i;
  290.  
  291.    if (!vsplit)
  292.       return NULL;
  293.  
  294.    vsplit->base.prepare = vsplit_prepare;
  295.    vsplit->base.run     = NULL;
  296.    vsplit->base.flush   = vsplit_flush;
  297.    vsplit->base.destroy = vsplit_destroy;
  298.    vsplit->draw = draw;
  299.  
  300.    for (i = 0; i < SEGMENT_SIZE; i++)
  301.       vsplit->identity_draw_elts[i] = i;
  302.  
  303.    return &vsplit->base;
  304. }
  305.