Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28.  /*
  29.   * Authors:
  30.   *   Keith Whitwell <keith@tungstengraphics.com>
  31.   */
  32.  
  33.  
  34. #include "pipe/p_context.h"
  35. #include "util/u_memory.h"
  36. #include "util/u_math.h"
  37. #include "util/u_cpu_detect.h"
  38. #include "util/u_inlines.h"
  39. #include "util/u_helpers.h"
  40. #include "util/u_prim.h"
  41. #include "draw_context.h"
  42. #include "draw_vs.h"
  43. #include "draw_gs.h"
  44.  
  45. #if HAVE_LLVM
  46. #include "gallivm/lp_bld_init.h"
  47. #include "gallivm/lp_bld_limits.h"
  48. #include "draw_llvm.h"
  49.  
  50. boolean
  51. draw_get_option_use_llvm(void)
  52. {
  53.    static boolean first = TRUE;
  54.    static boolean value;
  55.    if (first) {
  56.       first = FALSE;
  57.       value = debug_get_bool_option("DRAW_USE_LLVM", TRUE);
  58.  
  59. #ifdef PIPE_ARCH_X86
  60.       util_cpu_detect();
  61.       /* require SSE2 due to LLVM PR6960. XXX Might be fixed by now? */
  62.       if (!util_cpu_caps.has_sse2)
  63.          value = FALSE;
  64. #endif
  65.    }
  66.    return value;
  67. }
  68. #endif
  69.  
  70.  
  71. /**
  72.  * Create new draw module context with gallivm state for LLVM JIT.
  73.  */
  74. static struct draw_context *
  75. draw_create_context(struct pipe_context *pipe, boolean try_llvm)
  76. {
  77.    struct draw_context *draw = CALLOC_STRUCT( draw_context );
  78.    if (draw == NULL)
  79.       goto err_out;
  80.  
  81.    /* we need correct cpu caps for disabling denorms in draw_vbo() */
  82.    util_cpu_detect();
  83.  
  84. #if HAVE_LLVM
  85.    if (try_llvm && draw_get_option_use_llvm()) {
  86.       draw->llvm = draw_llvm_create(draw);
  87.       if (!draw->llvm)
  88.          goto err_destroy;
  89.    }
  90. #endif
  91.  
  92.    draw->pipe = pipe;
  93.  
  94.    if (!draw_init(draw))
  95.       goto err_destroy;
  96.  
  97.    return draw;
  98.  
  99. err_destroy:
  100.    draw_destroy( draw );
  101. err_out:
  102.    return NULL;
  103. }
  104.  
  105.  
  106. /**
  107.  * Create new draw module context, with LLVM JIT.
  108.  */
  109. struct draw_context *
  110. draw_create(struct pipe_context *pipe)
  111. {
  112.    return draw_create_context(pipe, TRUE);
  113. }
  114.  
  115.  
  116. /**
  117.  * Create a new draw context, without LLVM JIT.
  118.  */
  119. struct draw_context *
  120. draw_create_no_llvm(struct pipe_context *pipe)
  121. {
  122.    return draw_create_context(pipe, FALSE);
  123. }
  124.  
  125.  
  126. boolean draw_init(struct draw_context *draw)
  127. {
  128.    /*
  129.     * Note that several functions compute the clipmask of the predefined
  130.     * formats with hardcoded formulas instead of using these. So modifications
  131.     * here must be reflected there too.
  132.     */
  133.  
  134.    ASSIGN_4V( draw->plane[0], -1,  0,  0, 1 );
  135.    ASSIGN_4V( draw->plane[1],  1,  0,  0, 1 );
  136.    ASSIGN_4V( draw->plane[2],  0, -1,  0, 1 );
  137.    ASSIGN_4V( draw->plane[3],  0,  1,  0, 1 );
  138.    ASSIGN_4V( draw->plane[4],  0,  0,  1, 1 ); /* yes these are correct */
  139.    ASSIGN_4V( draw->plane[5],  0,  0, -1, 1 ); /* mesa's a bit wonky */
  140.    draw->clip_xy = TRUE;
  141.    draw->clip_z = TRUE;
  142.  
  143.    draw->pt.user.planes = (float (*) [DRAW_TOTAL_CLIP_PLANES][4]) &(draw->plane[0]);
  144.    draw->pt.user.eltMax = ~0;
  145.  
  146.    if (!draw_pipeline_init( draw ))
  147.       return FALSE;
  148.  
  149.    if (!draw_pt_init( draw ))
  150.       return FALSE;
  151.  
  152.    if (!draw_vs_init( draw ))
  153.       return FALSE;
  154.  
  155.    if (!draw_gs_init( draw ))
  156.       return FALSE;
  157.  
  158.    draw->quads_always_flatshade_last = !draw->pipe->screen->get_param(
  159.       draw->pipe->screen, PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION);
  160.  
  161.    return TRUE;
  162. }
  163.  
  164. /*
  165.  * Called whenever we're starting to draw a new instance.
  166.  * Some internal structures don't want to have to reset internal
  167.  * members on each invocation (because their state might have to persist
  168.  * between multiple primitive restart rendering call) but might have to
  169.  * for each new instance.
  170.  * This is particularly the case for primitive id's in geometry shader.
  171.  */
  172. void draw_new_instance(struct draw_context *draw)
  173. {
  174.    draw_geometry_shader_new_instance(draw->gs.geometry_shader);
  175. }
  176.  
  177.  
  178. void draw_destroy( struct draw_context *draw )
  179. {
  180.    struct pipe_context *pipe;
  181.    unsigned i, j;
  182.  
  183.    if (!draw)
  184.       return;
  185.  
  186.    pipe = draw->pipe;
  187.  
  188.    /* free any rasterizer CSOs that we may have created.
  189.     */
  190.    for (i = 0; i < 2; i++) {
  191.       for (j = 0; j < 2; j++) {
  192.          if (draw->rasterizer_no_cull[i][j]) {
  193.             pipe->delete_rasterizer_state(pipe, draw->rasterizer_no_cull[i][j]);
  194.          }
  195.       }
  196.    }
  197.  
  198.    for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
  199.       pipe_resource_reference(&draw->pt.vertex_buffer[i].buffer, NULL);
  200.    }
  201.  
  202.    /* Not so fast -- we're just borrowing this at the moment.
  203.     *
  204.    if (draw->render)
  205.       draw->render->destroy( draw->render );
  206.    */
  207.  
  208.    draw_pipeline_destroy( draw );
  209.    draw_pt_destroy( draw );
  210.    draw_vs_destroy( draw );
  211.    draw_gs_destroy( draw );
  212. #ifdef HAVE_LLVM
  213.    if (draw->llvm)
  214.       draw_llvm_destroy( draw->llvm );
  215. #endif
  216.  
  217.    FREE( draw );
  218. }
  219.  
  220.  
  221.  
  222. void draw_flush( struct draw_context *draw )
  223. {
  224.    draw_do_flush( draw, DRAW_FLUSH_BACKEND );
  225. }
  226.  
  227.  
  228. /**
  229.  * Specify the Minimum Resolvable Depth factor for polygon offset.
  230.  * This factor potentially depends on the number of Z buffer bits,
  231.  * the rasterization algorithm and the arithmetic performed on Z
  232.  * values between vertex shading and rasterization.  It will vary
  233.  * from one driver to another.
  234.  */
  235. void draw_set_mrd(struct draw_context *draw, double mrd)
  236. {
  237.    draw->mrd = mrd;
  238. }
  239.  
  240.  
  241. static void update_clip_flags( struct draw_context *draw )
  242. {
  243.    draw->clip_xy = !draw->driver.bypass_clip_xy;
  244.    draw->guard_band_xy = (!draw->driver.bypass_clip_xy &&
  245.                           draw->driver.guard_band_xy);
  246.    draw->clip_z = (!draw->driver.bypass_clip_z &&
  247.                    draw->rasterizer && draw->rasterizer->depth_clip);
  248.    draw->clip_user = draw->rasterizer &&
  249.                      draw->rasterizer->clip_plane_enable != 0;
  250. }
  251.  
  252. /**
  253.  * Register new primitive rasterization/rendering state.
  254.  * This causes the drawing pipeline to be rebuilt.
  255.  */
  256. void draw_set_rasterizer_state( struct draw_context *draw,
  257.                                 const struct pipe_rasterizer_state *raster,
  258.                                 void *rast_handle )
  259. {
  260.    if (!draw->suspend_flushing) {
  261.       draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  262.  
  263.       draw->rasterizer = raster;
  264.       draw->rast_handle = rast_handle;
  265.       update_clip_flags(draw);
  266.    }
  267. }
  268.  
  269. /* With a little more work, llvmpipe will be able to turn this off and
  270.  * do its own x/y clipping.  
  271.  *
  272.  * Some hardware can turn off clipping altogether - in particular any
  273.  * hardware with a TNL unit can do its own clipping, even if it is
  274.  * relying on the draw module for some other reason.
  275.  */
  276. void draw_set_driver_clipping( struct draw_context *draw,
  277.                                boolean bypass_clip_xy,
  278.                                boolean bypass_clip_z,
  279.                                boolean guard_band_xy)
  280. {
  281.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  282.  
  283.    draw->driver.bypass_clip_xy = bypass_clip_xy;
  284.    draw->driver.bypass_clip_z = bypass_clip_z;
  285.    draw->driver.guard_band_xy = guard_band_xy;
  286.    update_clip_flags(draw);
  287. }
  288.  
  289.  
  290. /**
  291.  * Plug in the primitive rendering/rasterization stage (which is the last
  292.  * stage in the drawing pipeline).
  293.  * This is provided by the device driver.
  294.  */
  295. void draw_set_rasterize_stage( struct draw_context *draw,
  296.                                struct draw_stage *stage )
  297. {
  298.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  299.  
  300.    draw->pipeline.rasterize = stage;
  301. }
  302.  
  303.  
  304. /**
  305.  * Set the draw module's clipping state.
  306.  */
  307. void draw_set_clip_state( struct draw_context *draw,
  308.                           const struct pipe_clip_state *clip )
  309. {
  310.    draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
  311.  
  312.    memcpy(&draw->plane[6], clip->ucp, sizeof(clip->ucp));
  313. }
  314.  
  315.  
  316. /**
  317.  * Set the draw module's viewport state.
  318.  */
  319. void draw_set_viewport_states( struct draw_context *draw,
  320.                                unsigned start_slot,
  321.                                unsigned num_viewports,
  322.                                const struct pipe_viewport_state *vps )
  323. {
  324.    const struct pipe_viewport_state *viewport = vps;
  325.    draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
  326.  
  327.    debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
  328.    debug_assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
  329.  
  330.    memcpy(draw->viewports + start_slot, vps,
  331.           sizeof(struct pipe_viewport_state) * num_viewports);
  332.  
  333.    draw->identity_viewport = (num_viewports == 1) &&
  334.       (viewport->scale[0] == 1.0f &&
  335.        viewport->scale[1] == 1.0f &&
  336.        viewport->scale[2] == 1.0f &&
  337.        viewport->scale[3] == 1.0f &&
  338.        viewport->translate[0] == 0.0f &&
  339.        viewport->translate[1] == 0.0f &&
  340.        viewport->translate[2] == 0.0f &&
  341.        viewport->translate[3] == 0.0f);
  342. }
  343.  
  344.  
  345.  
  346. void
  347. draw_set_vertex_buffers(struct draw_context *draw,
  348.                         unsigned start_slot, unsigned count,
  349.                         const struct pipe_vertex_buffer *buffers)
  350. {
  351.    assert(start_slot + count <= PIPE_MAX_ATTRIBS);
  352.  
  353.    util_set_vertex_buffers_count(draw->pt.vertex_buffer,
  354.                                  &draw->pt.nr_vertex_buffers,
  355.                                  buffers, start_slot, count);
  356. }
  357.  
  358.  
  359. void
  360. draw_set_vertex_elements(struct draw_context *draw,
  361.                          unsigned count,
  362.                          const struct pipe_vertex_element *elements)
  363. {
  364.    assert(count <= PIPE_MAX_ATTRIBS);
  365.  
  366.    /* We could improve this by only flushing the frontend and the fetch part
  367.     * of the middle. This would avoid recalculating the emit keys.*/
  368.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  369.  
  370.    memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0]));
  371.    draw->pt.nr_vertex_elements = count;
  372. }
  373.  
  374.  
  375. /**
  376.  * Tell drawing context where to find mapped vertex buffers.
  377.  */
  378. void
  379. draw_set_mapped_vertex_buffer(struct draw_context *draw,
  380.                               unsigned attr, const void *buffer,
  381.                               size_t size)
  382. {
  383.    draw->pt.user.vbuffer[attr].map  = buffer;
  384.    draw->pt.user.vbuffer[attr].size = size;
  385. }
  386.  
  387.  
  388. void
  389. draw_set_mapped_constant_buffer(struct draw_context *draw,
  390.                                 unsigned shader_type,
  391.                                 unsigned slot,
  392.                                 const void *buffer,
  393.                                 unsigned size )
  394. {
  395.    debug_assert(shader_type == PIPE_SHADER_VERTEX ||
  396.                 shader_type == PIPE_SHADER_GEOMETRY);
  397.    debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
  398.  
  399.    draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
  400.  
  401.    switch (shader_type) {
  402.    case PIPE_SHADER_VERTEX:
  403.       draw->pt.user.vs_constants[slot] = buffer;
  404.       draw->pt.user.vs_constants_size[slot] = size;
  405.       break;
  406.    case PIPE_SHADER_GEOMETRY:
  407.       draw->pt.user.gs_constants[slot] = buffer;
  408.       draw->pt.user.gs_constants_size[slot] = size;
  409.       break;
  410.    default:
  411.       assert(0 && "invalid shader type in draw_set_mapped_constant_buffer");
  412.    }
  413. }
  414.  
  415.  
  416. /**
  417.  * Tells the draw module to draw points with triangles if their size
  418.  * is greater than this threshold.
  419.  */
  420. void
  421. draw_wide_point_threshold(struct draw_context *draw, float threshold)
  422. {
  423.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  424.    draw->pipeline.wide_point_threshold = threshold;
  425. }
  426.  
  427.  
  428. /**
  429.  * Should the draw module handle point->quad conversion for drawing sprites?
  430.  */
  431. void
  432. draw_wide_point_sprites(struct draw_context *draw, boolean draw_sprite)
  433. {
  434.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  435.    draw->pipeline.wide_point_sprites = draw_sprite;
  436. }
  437.  
  438.  
  439. /**
  440.  * Tells the draw module to draw lines with triangles if their width
  441.  * is greater than this threshold.
  442.  */
  443. void
  444. draw_wide_line_threshold(struct draw_context *draw, float threshold)
  445. {
  446.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  447.    draw->pipeline.wide_line_threshold = roundf(threshold);
  448. }
  449.  
  450.  
  451. /**
  452.  * Tells the draw module whether or not to implement line stipple.
  453.  */
  454. void
  455. draw_enable_line_stipple(struct draw_context *draw, boolean enable)
  456. {
  457.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  458.    draw->pipeline.line_stipple = enable;
  459. }
  460.  
  461.  
  462. /**
  463.  * Tells draw module whether to convert points to quads for sprite mode.
  464.  */
  465. void
  466. draw_enable_point_sprites(struct draw_context *draw, boolean enable)
  467. {
  468.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  469.    draw->pipeline.point_sprite = enable;
  470. }
  471.  
  472.  
  473. void
  474. draw_set_force_passthrough( struct draw_context *draw, boolean enable )
  475. {
  476.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  477.    draw->force_passthrough = enable;
  478. }
  479.  
  480.  
  481.  
  482. /**
  483.  * Allocate an extra vertex/geometry shader vertex attribute, if it doesn't
  484.  * exist already.
  485.  *
  486.  * This is used by some of the optional draw module stages such
  487.  * as wide_point which may need to allocate additional generic/texcoord
  488.  * attributes.
  489.  */
  490. int
  491. draw_alloc_extra_vertex_attrib(struct draw_context *draw,
  492.                                uint semantic_name, uint semantic_index)
  493. {
  494.    int slot;
  495.    uint num_outputs;
  496.    uint n;
  497.  
  498.    slot = draw_find_shader_output(draw, semantic_name, semantic_index);
  499.    if (slot >= 0) {
  500.       return slot;
  501.    }
  502.  
  503.    num_outputs = draw_current_shader_outputs(draw);
  504.    n = draw->extra_shader_outputs.num;
  505.  
  506.    assert(n < Elements(draw->extra_shader_outputs.semantic_name));
  507.  
  508.    draw->extra_shader_outputs.semantic_name[n] = semantic_name;
  509.    draw->extra_shader_outputs.semantic_index[n] = semantic_index;
  510.    draw->extra_shader_outputs.slot[n] = num_outputs + n;
  511.    draw->extra_shader_outputs.num++;
  512.  
  513.    return draw->extra_shader_outputs.slot[n];
  514. }
  515.  
  516.  
  517. /**
  518.  * Remove all extra vertex attributes that were allocated with
  519.  * draw_alloc_extra_vertex_attrib().
  520.  */
  521. void
  522. draw_remove_extra_vertex_attribs(struct draw_context *draw)
  523. {
  524.    draw->extra_shader_outputs.num = 0;
  525. }
  526.  
  527.  
  528. /**
  529.  * If a geometry shader is present, return its info, else the vertex shader's
  530.  * info.
  531.  */
  532. struct tgsi_shader_info *
  533. draw_get_shader_info(const struct draw_context *draw)
  534. {
  535.  
  536.    if (draw->gs.geometry_shader) {
  537.       return &draw->gs.geometry_shader->info;
  538.    } else {
  539.       return &draw->vs.vertex_shader->info;
  540.    }
  541. }
  542.  
  543.  
  544. /**
  545.  * Ask the draw module for the location/slot of the given vertex attribute in
  546.  * a post-transformed vertex.
  547.  *
  548.  * With this function, drivers that use the draw module should have no reason
  549.  * to track the current vertex/geometry shader.
  550.  *
  551.  * Note that the draw module may sometimes generate vertices with extra
  552.  * attributes (such as texcoords for AA lines).  The driver can call this
  553.  * function to find those attributes.
  554.  *
  555.  * -1 is returned if the attribute is not found since this is
  556.  * an undefined situation. Note, that zero is valid and can
  557.  * be used by any of the attributes, because position is not
  558.  * required to be attribute 0 or even at all present.
  559.  */
  560. int
  561. draw_find_shader_output(const struct draw_context *draw,
  562.                         uint semantic_name, uint semantic_index)
  563. {
  564.    const struct tgsi_shader_info *info = draw_get_shader_info(draw);
  565.    uint i;
  566.  
  567.    for (i = 0; i < info->num_outputs; i++) {
  568.       if (info->output_semantic_name[i] == semantic_name &&
  569.           info->output_semantic_index[i] == semantic_index)
  570.          return i;
  571.    }
  572.  
  573.    /* Search the extra vertex attributes */
  574.    for (i = 0; i < draw->extra_shader_outputs.num; i++) {
  575.       if (draw->extra_shader_outputs.semantic_name[i] == semantic_name &&
  576.           draw->extra_shader_outputs.semantic_index[i] == semantic_index) {
  577.          return draw->extra_shader_outputs.slot[i];
  578.       }
  579.    }
  580.  
  581.    return -1;
  582. }
  583.  
  584.  
  585. /**
  586.  * Return total number of the shader outputs.  This function is similar to
  587.  * draw_current_shader_outputs() but this function also counts any extra
  588.  * vertex/geometry output attributes that may be filled in by some draw
  589.  * stages (such as AA point, AA line).
  590.  *
  591.  * If geometry shader is present, its output will be returned,
  592.  * if not vertex shader is used.
  593.  */
  594. uint
  595. draw_num_shader_outputs(const struct draw_context *draw)
  596. {
  597.    const struct tgsi_shader_info *info = draw_get_shader_info(draw);
  598.    uint count;
  599.  
  600.    count = info->num_outputs;
  601.    count += draw->extra_shader_outputs.num;
  602.  
  603.    return count;
  604. }
  605.  
  606.  
  607. /**
  608.  * Provide TGSI sampler objects for vertex/geometry shaders that use
  609.  * texture fetches.  This state only needs to be set once per context.
  610.  * This might only be used by software drivers for the time being.
  611.  */
  612. void
  613. draw_texture_sampler(struct draw_context *draw,
  614.                      uint shader,
  615.                      struct tgsi_sampler *sampler)
  616. {
  617.    if (shader == PIPE_SHADER_VERTEX) {
  618.       draw->vs.tgsi.sampler = sampler;
  619.    } else {
  620.       debug_assert(shader == PIPE_SHADER_GEOMETRY);
  621.       draw->gs.tgsi.sampler = sampler;
  622.    }
  623. }
  624.  
  625.  
  626.  
  627.  
  628. void draw_set_render( struct draw_context *draw,
  629.                       struct vbuf_render *render )
  630. {
  631.    draw->render = render;
  632. }
  633.  
  634.  
  635. /**
  636.  * Tell the draw module where vertex indexes/elements are located, and
  637.  * their size (in bytes).
  638.  *
  639.  * Note: the caller must apply the pipe_index_buffer::offset value to
  640.  * the address.  The draw module doesn't do that.
  641.  */
  642. void
  643. draw_set_indexes(struct draw_context *draw,
  644.                  const void *elements, unsigned elem_size,
  645.                  unsigned elem_buffer_space)
  646. {
  647.    assert(elem_size == 0 ||
  648.           elem_size == 1 ||
  649.           elem_size == 2 ||
  650.           elem_size == 4);
  651.    draw->pt.user.elts = elements;
  652.    draw->pt.user.eltSizeIB = elem_size;
  653.    if (elem_size)
  654.       draw->pt.user.eltMax = elem_buffer_space / elem_size;
  655.    else
  656.       draw->pt.user.eltMax = 0;
  657. }
  658.  
  659.  
  660. /* Revamp me please:
  661.  */
  662. void draw_do_flush( struct draw_context *draw, unsigned flags )
  663. {
  664.    if (!draw->suspend_flushing)
  665.    {
  666.       assert(!draw->flushing); /* catch inadvertant recursion */
  667.  
  668.       draw->flushing = TRUE;
  669.  
  670.       draw_pipeline_flush( draw, flags );
  671.  
  672.       draw_pt_flush( draw, flags );
  673.  
  674.       draw->flushing = FALSE;
  675.    }
  676. }
  677.  
  678.  
  679. /**
  680.  * Return the number of output attributes produced by the geometry
  681.  * shader, if present.  If no geometry shader, return the number of
  682.  * outputs from the vertex shader.
  683.  * \sa draw_num_shader_outputs
  684.  */
  685. uint
  686. draw_current_shader_outputs(const struct draw_context *draw)
  687. {
  688.    if (draw->gs.geometry_shader)
  689.       return draw->gs.num_gs_outputs;
  690.    return draw->vs.num_vs_outputs;
  691. }
  692.  
  693.  
  694. /**
  695.  * Return the index of the shader output which will contain the
  696.  * vertex position.
  697.  */
  698. uint
  699. draw_current_shader_position_output(const struct draw_context *draw)
  700. {
  701.    if (draw->gs.geometry_shader)
  702.       return draw->gs.position_output;
  703.    return draw->vs.position_output;
  704. }
  705.  
  706.  
  707. /**
  708.  * Return the index of the shader output which will contain the
  709.  * viewport index.
  710.  */
  711. uint
  712. draw_current_shader_viewport_index_output(const struct draw_context *draw)
  713. {
  714.    if (draw->gs.geometry_shader)
  715.       return draw->gs.geometry_shader->viewport_index_output;
  716.    return 0;
  717. }
  718.  
  719. /**
  720.  * Returns true if there's a geometry shader bound and the geometry
  721.  * shader writes out a viewport index.
  722.  */
  723. boolean
  724. draw_current_shader_uses_viewport_index(const struct draw_context *draw)
  725. {
  726.    if (draw->gs.geometry_shader)
  727.       return draw->gs.geometry_shader->info.writes_viewport_index;
  728.    return FALSE;
  729. }
  730.  
  731.  
  732. /**
  733.  * Return the index of the shader output which will contain the
  734.  * vertex position.
  735.  */
  736. uint
  737. draw_current_shader_clipvertex_output(const struct draw_context *draw)
  738. {
  739.    return draw->vs.clipvertex_output;
  740. }
  741.  
  742. uint
  743. draw_current_shader_clipdistance_output(const struct draw_context *draw, int index)
  744. {
  745.    debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
  746.    if (draw->gs.geometry_shader)
  747.       return draw->gs.geometry_shader->clipdistance_output[index];
  748.    return draw->vs.clipdistance_output[index];
  749. }
  750.  
  751.  
  752. uint
  753. draw_current_shader_num_written_clipdistances(const struct draw_context *draw)
  754. {
  755.    if (draw->gs.geometry_shader)
  756.       return draw->gs.geometry_shader->info.num_written_clipdistance;
  757.    return draw->vs.vertex_shader->info.num_written_clipdistance;
  758. }
  759.  
  760.  
  761. uint
  762. draw_current_shader_culldistance_output(const struct draw_context *draw, int index)
  763. {
  764.    debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
  765.    if (draw->gs.geometry_shader)
  766.       return draw->gs.geometry_shader->culldistance_output[index];
  767.    return draw->vs.vertex_shader->culldistance_output[index];
  768. }
  769.  
  770. uint
  771. draw_current_shader_num_written_culldistances(const struct draw_context *draw)
  772. {
  773.    if (draw->gs.geometry_shader)
  774.       return draw->gs.geometry_shader->info.num_written_culldistance;
  775.    return draw->vs.vertex_shader->info.num_written_culldistance;
  776. }
  777.  
  778. /**
  779.  * Return a pointer/handle for a driver/CSO rasterizer object which
  780.  * disabled culling, stippling, unfilled tris, etc.
  781.  * This is used by some pipeline stages (such as wide_point, aa_line
  782.  * and aa_point) which convert points/lines into triangles.  In those
  783.  * cases we don't want to accidentally cull the triangles.
  784.  *
  785.  * \param scissor  should the rasterizer state enable scissoring?
  786.  * \param flatshade  should the rasterizer state use flat shading?
  787.  * \return  rasterizer CSO handle
  788.  */
  789. void *
  790. draw_get_rasterizer_no_cull( struct draw_context *draw,
  791.                              boolean scissor,
  792.                              boolean flatshade )
  793. {
  794.    if (!draw->rasterizer_no_cull[scissor][flatshade]) {
  795.       /* create now */
  796.       struct pipe_context *pipe = draw->pipe;
  797.       struct pipe_rasterizer_state rast;
  798.  
  799.       memset(&rast, 0, sizeof(rast));
  800.       rast.scissor = scissor;
  801.       rast.flatshade = flatshade;
  802.       rast.front_ccw = 1;
  803.       rast.half_pixel_center = draw->rasterizer->half_pixel_center;
  804.       rast.bottom_edge_rule = draw->rasterizer->bottom_edge_rule;
  805.       rast.clip_halfz = draw->rasterizer->clip_halfz;
  806.  
  807.       draw->rasterizer_no_cull[scissor][flatshade] =
  808.          pipe->create_rasterizer_state(pipe, &rast);
  809.    }
  810.    return draw->rasterizer_no_cull[scissor][flatshade];
  811. }
  812.  
  813. void
  814. draw_set_mapped_so_targets(struct draw_context *draw,
  815.                            int num_targets,
  816.                            struct draw_so_target *targets[PIPE_MAX_SO_BUFFERS])
  817. {
  818.    int i;
  819.  
  820.    for (i = 0; i < num_targets; i++)
  821.       draw->so.targets[i] = targets[i];
  822.    for (i = num_targets; i < PIPE_MAX_SO_BUFFERS; i++)
  823.       draw->so.targets[i] = NULL;
  824.  
  825.    draw->so.num_targets = num_targets;
  826. }
  827.  
  828. void
  829. draw_set_sampler_views(struct draw_context *draw,
  830.                        unsigned shader_stage,
  831.                        struct pipe_sampler_view **views,
  832.                        unsigned num)
  833. {
  834.    unsigned i;
  835.  
  836.    debug_assert(shader_stage < PIPE_SHADER_TYPES);
  837.    debug_assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
  838.  
  839.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  840.  
  841.    for (i = 0; i < num; ++i)
  842.       draw->sampler_views[shader_stage][i] = views[i];
  843.    for (i = num; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i)
  844.       draw->sampler_views[shader_stage][i] = NULL;
  845.  
  846.    draw->num_sampler_views[shader_stage] = num;
  847. }
  848.  
  849. void
  850. draw_set_samplers(struct draw_context *draw,
  851.                   unsigned shader_stage,
  852.                   struct pipe_sampler_state **samplers,
  853.                   unsigned num)
  854. {
  855.    unsigned i;
  856.  
  857.    debug_assert(shader_stage < PIPE_SHADER_TYPES);
  858.    debug_assert(num <= PIPE_MAX_SAMPLERS);
  859.  
  860.    draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
  861.  
  862.    for (i = 0; i < num; ++i)
  863.       draw->samplers[shader_stage][i] = samplers[i];
  864.    for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
  865.       draw->samplers[shader_stage][i] = NULL;
  866.  
  867.    draw->num_samplers[shader_stage] = num;
  868.  
  869. #ifdef HAVE_LLVM
  870.    if (draw->llvm)
  871.       draw_llvm_set_sampler_state(draw, shader_stage);
  872. #endif
  873. }
  874.  
  875. void
  876. draw_set_mapped_texture(struct draw_context *draw,
  877.                         unsigned shader_stage,
  878.                         unsigned sview_idx,
  879.                         uint32_t width, uint32_t height, uint32_t depth,
  880.                         uint32_t first_level, uint32_t last_level,
  881.                         const void *base_ptr,
  882.                         uint32_t row_stride[PIPE_MAX_TEXTURE_LEVELS],
  883.                         uint32_t img_stride[PIPE_MAX_TEXTURE_LEVELS],
  884.                         uint32_t mip_offsets[PIPE_MAX_TEXTURE_LEVELS])
  885. {
  886. #ifdef HAVE_LLVM
  887.    if (draw->llvm)
  888.       draw_llvm_set_mapped_texture(draw,
  889.                                    shader_stage,
  890.                                    sview_idx,
  891.                                    width, height, depth, first_level,
  892.                                    last_level, base_ptr,
  893.                                    row_stride, img_stride, mip_offsets);
  894. #endif
  895. }
  896.  
  897. /**
  898.  * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
  899.  * different ways of setting textures, and drivers typically only support one.
  900.  */
  901. int
  902. draw_get_shader_param_no_llvm(unsigned shader, enum pipe_shader_cap param)
  903. {
  904.    switch(shader) {
  905.    case PIPE_SHADER_VERTEX:
  906.    case PIPE_SHADER_GEOMETRY:
  907.       return tgsi_exec_get_shader_param(param);
  908.    default:
  909.       return 0;
  910.    }
  911. }
  912.  
  913. /**
  914.  * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
  915.  * different ways of setting textures, and drivers typically only support one.
  916.  */
  917. int
  918. draw_get_shader_param(unsigned shader, enum pipe_shader_cap param)
  919. {
  920.  
  921. #ifdef HAVE_LLVM
  922.    if (draw_get_option_use_llvm()) {
  923.       switch(shader) {
  924.       case PIPE_SHADER_VERTEX:
  925.       case PIPE_SHADER_GEOMETRY:
  926.          return gallivm_get_shader_param(param);
  927.       default:
  928.          return 0;
  929.       }
  930.    }
  931. #endif
  932.  
  933.    return draw_get_shader_param_no_llvm(shader, param);
  934. }
  935.  
  936. /**
  937.  * Enables or disables collection of statistics.
  938.  *
  939.  * Draw module is capable of generating statistics for the vertex
  940.  * processing pipeline. Collection of that data isn't free and so
  941.  * it's disabled by default. The users of the module can enable
  942.  * (or disable) this functionality through this function.
  943.  * The actual data will be emitted through the VBUF interface,
  944.  * the 'pipeline_statistics' callback to be exact.
  945.  */
  946. void
  947. draw_collect_pipeline_statistics(struct draw_context *draw,
  948.                                  boolean enable)
  949. {
  950.    draw->collect_statistics = enable;
  951. }
  952.  
  953. /**
  954.  * Computes clipper invocation statistics.
  955.  *
  956.  * Figures out how many primitives would have been
  957.  * sent to the clipper given the specified
  958.  * prim info data.
  959.  */
  960. void
  961. draw_stats_clipper_primitives(struct draw_context *draw,
  962.                               const struct draw_prim_info *prim_info)
  963. {
  964.    if (draw->collect_statistics) {
  965.       unsigned start, i;
  966.       for (start = i = 0;
  967.            i < prim_info->primitive_count;
  968.            start += prim_info->primitive_lengths[i], i++)
  969.       {
  970.          draw->statistics.c_invocations +=
  971.             u_decomposed_prims_for_vertices(prim_info->prim,
  972.                                             prim_info->primitive_lengths[i]);
  973.       }
  974.    }
  975. }
  976.