Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Mesa 3-D graphics library
  3.  *
  4.  * Copyright (C) 2012-2013 LunarG, Inc.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included
  14.  * in all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22.  * DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Chia-I Wu <olv@lunarg.com>
  26.  */
  27.  
  28. #include "util/u_prim.h"
  29. #include "core/intel_winsys.h"
  30.  
  31. #include "ilo_render.h"
  32. #include "ilo_blit.h"
  33. #include "ilo_context.h"
  34. #include "ilo_cp.h"
  35. #include "ilo_query.h"
  36. #include "ilo_shader.h"
  37. #include "ilo_state.h"
  38. #include "ilo_draw.h"
  39.  
  40. static void
  41. ilo_draw_set_owner(struct ilo_context *ilo)
  42. {
  43.    ilo_cp_set_owner(ilo->cp, INTEL_RING_RENDER, &ilo->draw.cp_owner);
  44. }
  45.  
  46. static uint64_t
  47. query_timestamp_to_ns(const struct ilo_context *ilo, uint64_t timestamp)
  48. {
  49.    /* see ilo_get_timestamp() */
  50.    return (timestamp & 0xffffffff) * 80;
  51. }
  52.  
  53. /**
  54.  * Process the bo and accumulate the result.  The bo is emptied.
  55.  */
  56. static void
  57. query_process_bo(const struct ilo_context *ilo, struct ilo_query *q)
  58. {
  59.    const uint64_t *vals;
  60.    uint64_t tmp;
  61.    int i;
  62.  
  63.    if (!q->used)
  64.       return;
  65.  
  66.    vals = intel_bo_map(q->bo, false);
  67.    if (!vals) {
  68.       q->used = 0;
  69.       return;
  70.    }
  71.  
  72.    switch (q->type) {
  73.    case PIPE_QUERY_OCCLUSION_COUNTER:
  74.    case PIPE_QUERY_TIME_ELAPSED:
  75.    case PIPE_QUERY_PRIMITIVES_GENERATED:
  76.    case PIPE_QUERY_PRIMITIVES_EMITTED:
  77.       assert(q->stride == sizeof(*vals) * 2);
  78.  
  79.       tmp = 0;
  80.       for (i = 0; i < q->used; i++)
  81.          tmp += vals[2 * i + 1] - vals[2 * i];
  82.  
  83.       if (q->type == PIPE_QUERY_TIME_ELAPSED)
  84.          tmp = query_timestamp_to_ns(ilo, tmp);
  85.  
  86.       q->result.u64 += tmp;
  87.       break;
  88.    case PIPE_QUERY_TIMESTAMP:
  89.       assert(q->stride == sizeof(*vals));
  90.  
  91.       q->result.u64 = query_timestamp_to_ns(ilo, vals[q->used - 1]);
  92.       break;
  93.    case PIPE_QUERY_PIPELINE_STATISTICS:
  94.       assert(q->stride == sizeof(*vals) * 22);
  95.  
  96.       for (i = 0; i < q->used; i++) {
  97.          struct pipe_query_data_pipeline_statistics *stats =
  98.             &q->result.pipeline_statistics;
  99.          const uint64_t *begin = vals + 22 * i;
  100.          const uint64_t *end = begin + 11;
  101.  
  102.          stats->ia_vertices    += end[0] - begin[0];
  103.          stats->ia_primitives  += end[1] - begin[1];
  104.          stats->vs_invocations += end[2] - begin[2];
  105.          stats->gs_invocations += end[3] - begin[3];
  106.          stats->gs_primitives  += end[4] - begin[4];
  107.          stats->c_invocations  += end[5] - begin[5];
  108.          stats->c_primitives   += end[6] - begin[6];
  109.          stats->ps_invocations += end[7] - begin[7];
  110.          stats->hs_invocations += end[8] - begin[8];
  111.          stats->ds_invocations += end[9] - begin[9];
  112.          stats->cs_invocations += end[10] - begin[10];
  113.       }
  114.       break;
  115.    default:
  116.       break;
  117.    }
  118.  
  119.    intel_bo_unmap(q->bo);
  120.  
  121.    q->used = 0;
  122. }
  123.  
  124. static void
  125. query_begin_bo(struct ilo_context *ilo, struct ilo_query *q)
  126. {
  127.    /* bo is full */
  128.    if (q->used >= q->count)
  129.       query_process_bo(ilo, q);
  130.  
  131.    /* write the beginning value to the bo */
  132.    if (q->in_pairs)
  133.       ilo_render_emit_query(ilo->render, q, q->stride * q->used);
  134. }
  135.  
  136. static void
  137. query_end_bo(struct ilo_context *ilo, struct ilo_query *q)
  138. {
  139.    uint32_t offset;
  140.  
  141.    assert(q->used < q->count);
  142.  
  143.    offset = q->stride * q->used;
  144.    if (q->in_pairs)
  145.       offset += q->stride >> 1;
  146.  
  147.    q->used++;
  148.  
  149.    /* write the ending value to the bo */
  150.    ilo_render_emit_query(ilo->render, q, offset);
  151. }
  152.  
  153. bool
  154. ilo_init_draw_query(struct ilo_context *ilo, struct ilo_query *q)
  155. {
  156.    unsigned bo_size;
  157.  
  158.    switch (q->type) {
  159.    case PIPE_QUERY_OCCLUSION_COUNTER:
  160.    case PIPE_QUERY_TIME_ELAPSED:
  161.    case PIPE_QUERY_PRIMITIVES_GENERATED:
  162.    case PIPE_QUERY_PRIMITIVES_EMITTED:
  163.       q->stride = sizeof(uint64_t);
  164.       q->in_pairs = true;
  165.       break;
  166.    case PIPE_QUERY_TIMESTAMP:
  167.       q->stride = sizeof(uint64_t);
  168.       q->in_pairs = false;
  169.       break;
  170.    case PIPE_QUERY_PIPELINE_STATISTICS:
  171.       q->stride = sizeof(uint64_t) * 11;
  172.       q->in_pairs = true;
  173.       break;
  174.    default:
  175.       return false;
  176.       break;
  177.    }
  178.  
  179.    q->cmd_len = ilo_render_get_query_len(ilo->render, q->type);
  180.  
  181.    /* double cmd_len and stride if in pairs */
  182.    q->cmd_len <<= q->in_pairs;
  183.    q->stride <<= q->in_pairs;
  184.  
  185.    bo_size = (q->stride > 4096) ? q->stride : 4096;
  186.    q->bo = intel_winsys_alloc_bo(ilo->winsys, "query", bo_size, false);
  187.    if (!q->bo)
  188.       return false;
  189.  
  190.    q->count = bo_size / q->stride;
  191.  
  192.    return true;
  193. }
  194.  
  195. void
  196. ilo_begin_draw_query(struct ilo_context *ilo, struct ilo_query *q)
  197. {
  198.    ilo_draw_set_owner(ilo);
  199.  
  200.    /* need to submit first */
  201.    if (!ilo_builder_validate(&ilo->cp->builder, 1, &q->bo) ||
  202.          ilo_cp_space(ilo->cp) < q->cmd_len) {
  203.       ilo_cp_submit(ilo->cp, "out of aperture or space");
  204.  
  205.       assert(ilo_builder_validate(&ilo->cp->builder, 1, &q->bo));
  206.       assert(ilo_cp_space(ilo->cp) >= q->cmd_len);
  207.  
  208.       ilo_draw_set_owner(ilo);
  209.    }
  210.  
  211.    /* reserve the space for ending/pausing the query */
  212.    ilo->draw.cp_owner.reserve += q->cmd_len >> q->in_pairs;
  213.  
  214.    query_begin_bo(ilo, q);
  215.  
  216.    if (q->in_pairs)
  217.       list_add(&q->list, &ilo->draw.queries);
  218. }
  219.  
  220. void
  221. ilo_end_draw_query(struct ilo_context *ilo, struct ilo_query *q)
  222. {
  223.    ilo_draw_set_owner(ilo);
  224.  
  225.    /* reclaim the reserved space */
  226.    ilo->draw.cp_owner.reserve -= q->cmd_len >> q->in_pairs;
  227.    assert(ilo->draw.cp_owner.reserve >= 0);
  228.  
  229.    query_end_bo(ilo, q);
  230.  
  231.    list_delinit(&q->list);
  232. }
  233.  
  234. /**
  235.  * Process the raw query data.
  236.  */
  237. void
  238. ilo_process_draw_query(struct ilo_context *ilo, struct ilo_query *q)
  239. {
  240.    query_process_bo(ilo, q);
  241. }
  242.  
  243. static void
  244. ilo_draw_own_cp(struct ilo_cp *cp, void *data)
  245. {
  246.    struct ilo_context *ilo = data;
  247.  
  248.    /* multiply by 2 for both resuming and pausing */
  249.    if (ilo_cp_space(ilo->cp) < ilo->draw.cp_owner.reserve * 2) {
  250.       ilo_cp_submit(ilo->cp, "out of space");
  251.       assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve * 2);
  252.    }
  253.  
  254.    while (true) {
  255.       struct ilo_builder_snapshot snapshot;
  256.       struct ilo_query *q;
  257.  
  258.       ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);
  259.  
  260.       /* resume queries */
  261.       LIST_FOR_EACH_ENTRY(q, &ilo->draw.queries, list)
  262.          query_begin_bo(ilo, q);
  263.  
  264.       if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
  265.          ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);
  266.  
  267.          if (ilo_builder_batch_used(&ilo->cp->builder)) {
  268.             ilo_cp_submit(ilo->cp, "out of aperture");
  269.             continue;
  270.          }
  271.       }
  272.  
  273.       break;
  274.    }
  275.  
  276.    assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve);
  277. }
  278.  
  279. static void
  280. ilo_draw_release_cp(struct ilo_cp *cp, void *data)
  281. {
  282.    struct ilo_context *ilo = data;
  283.    struct ilo_query *q;
  284.  
  285.    assert(ilo_cp_space(ilo->cp) >= ilo->draw.cp_owner.reserve);
  286.  
  287.    /* pause queries */
  288.    LIST_FOR_EACH_ENTRY(q, &ilo->draw.queries, list)
  289.       query_end_bo(ilo, q);
  290. }
  291.  
  292. static bool
  293. draw_vbo(struct ilo_context *ilo, const struct ilo_state_vector *vec)
  294. {
  295.    bool need_flush = false;
  296.    bool success = true;
  297.    int max_len, before_space;
  298.  
  299.    /* on Gen7 and Gen7.5, we need SOL_RESET to reset the SO write offsets */
  300.    if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7) &&
  301.        ilo_dev_gen(ilo->dev) <= ILO_GEN(7.5) &&
  302.        (vec->dirty & ILO_DIRTY_SO) && vec->so.enabled &&
  303.        !vec->so.append_bitmask) {
  304.       ilo_cp_submit(ilo->cp, "SOL_RESET");
  305.       ilo_cp_set_one_off_flags(ilo->cp, INTEL_EXEC_GEN7_SOL_RESET);
  306.    }
  307.  
  308.    if (ilo_builder_batch_used(&ilo->cp->builder)) {
  309.       /*
  310.        * Without a better tracking mechanism, when the framebuffer changes, we
  311.        * have to assume that the old framebuffer may be sampled from.  If that
  312.        * happens in the middle of a batch buffer, we need to insert manual
  313.        * flushes.
  314.        */
  315.       need_flush = (vec->dirty & ILO_DIRTY_FB);
  316.  
  317.       /* same to SO target changes */
  318.       need_flush |= (vec->dirty & ILO_DIRTY_SO);
  319.    }
  320.  
  321.    ilo_draw_set_owner(ilo);
  322.  
  323.    /* make sure there is enough room first */
  324.    max_len = ilo_render_get_draw_len(ilo->render, vec);
  325.    if (need_flush)
  326.       max_len += ilo_render_get_flush_len(ilo->render);
  327.  
  328.    if (max_len > ilo_cp_space(ilo->cp)) {
  329.       ilo_cp_submit(ilo->cp, "out of space");
  330.       need_flush = false;
  331.       assert(max_len <= ilo_cp_space(ilo->cp));
  332.    }
  333.  
  334.    /* space available before emission */
  335.    before_space = ilo_cp_space(ilo->cp);
  336.  
  337.    if (need_flush)
  338.       ilo_render_emit_flush(ilo->render);
  339.  
  340.    while (true) {
  341.       struct ilo_builder_snapshot snapshot;
  342.  
  343.       ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);
  344.  
  345.       ilo_render_emit_draw(ilo->render, vec);
  346.  
  347.       if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
  348.          ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);
  349.  
  350.          /* flush and try again */
  351.          if (ilo_builder_batch_used(&ilo->cp->builder)) {
  352.             ilo_cp_submit(ilo->cp, "out of aperture");
  353.             continue;
  354.          }
  355.  
  356.          success = false;
  357.       }
  358.  
  359.       break;
  360.    }
  361.  
  362.    /* sanity check size estimation */
  363.    assert(before_space - ilo_cp_space(ilo->cp) <= max_len);
  364.  
  365.    return success;
  366. }
  367.  
  368. void
  369. ilo_draw_rectlist(struct ilo_context *ilo)
  370. {
  371.    int max_len, before_space;
  372.    bool need_flush;
  373.  
  374.    need_flush = ilo_builder_batch_used(&ilo->cp->builder);
  375.  
  376.    ilo_draw_set_owner(ilo);
  377.  
  378.    max_len = ilo_render_get_rectlist_len(ilo->render, ilo->blitter);
  379.    max_len += ilo_render_get_flush_len(ilo->render) * 2;
  380.  
  381.    if (max_len > ilo_cp_space(ilo->cp)) {
  382.       ilo_cp_submit(ilo->cp, "out of space");
  383.       need_flush = false;
  384.       assert(max_len <= ilo_cp_space(ilo->cp));
  385.    }
  386.  
  387.    before_space = ilo_cp_space(ilo->cp);
  388.  
  389.    /*
  390.     * From the Sandy Bridge PRM, volume 2 part 1, page 313:
  391.     *
  392.     *     "If other rendering operations have preceded this clear, a
  393.     *      PIPE_CONTROL with write cache flush enabled and Z-inhibit
  394.     *      disabled must be issued before the rectangle primitive used for
  395.     *      the depth buffer clear operation."
  396.     *
  397.     * From the Sandy Bridge PRM, volume 2 part 1, page 314:
  398.     *
  399.     *     "Depth buffer clear pass must be followed by a PIPE_CONTROL
  400.     *      command with DEPTH_STALL bit set and Then followed by Depth
  401.     *      FLUSH"
  402.     *
  403.     * But the pipeline has to be flushed both before and after not only
  404.     * because of these workarounds.  We need them for reasons such as
  405.     *
  406.     *  - we may sample from a texture that was rendered to
  407.     *  - we may sample from the fb shortly after
  408.     *
  409.     * Skip checking blitter->op and do the flushes.
  410.     */
  411.    if (need_flush)
  412.       ilo_render_emit_flush(ilo->render);
  413.  
  414.    while (true) {
  415.       struct ilo_builder_snapshot snapshot;
  416.  
  417.       ilo_builder_batch_snapshot(&ilo->cp->builder, &snapshot);
  418.  
  419.       ilo_render_emit_rectlist(ilo->render, ilo->blitter);
  420.  
  421.       if (!ilo_builder_validate(&ilo->cp->builder, 0, NULL)) {
  422.          ilo_builder_batch_restore(&ilo->cp->builder, &snapshot);
  423.  
  424.          /* flush and try again */
  425.          if (ilo_builder_batch_used(&ilo->cp->builder)) {
  426.             ilo_cp_submit(ilo->cp, "out of aperture");
  427.             continue;
  428.          }
  429.       }
  430.  
  431.       break;
  432.    }
  433.  
  434.    ilo_render_invalidate_hw(ilo->render);
  435.  
  436.    ilo_render_emit_flush(ilo->render);
  437.  
  438.    /* sanity check size estimation */
  439.    assert(before_space - ilo_cp_space(ilo->cp) <= max_len);
  440. }
  441.  
  442. static void
  443. draw_vbo_with_sw_restart(struct ilo_context *ilo,
  444.                          const struct pipe_draw_info *info)
  445. {
  446.    const struct ilo_ib_state *ib = &ilo->state_vector.ib;
  447.    union {
  448.       const void *ptr;
  449.       const uint8_t *u8;
  450.       const uint16_t *u16;
  451.       const uint32_t *u32;
  452.    } u;
  453.  
  454.    /* we will draw with IB mapped */
  455.    if (ib->buffer) {
  456.       u.ptr = intel_bo_map(ilo_buffer(ib->buffer)->bo, false);
  457.       if (u.ptr)
  458.          u.u8 += ib->offset;
  459.    } else {
  460.       u.ptr = ib->user_buffer;
  461.    }
  462.  
  463.    if (!u.ptr)
  464.       return;
  465.  
  466. #define DRAW_VBO_WITH_SW_RESTART(pipe, info, ptr) do {   \
  467.    const unsigned end = (info)->start + (info)->count;   \
  468.    struct pipe_draw_info subinfo;                        \
  469.    unsigned i;                                           \
  470.                                                          \
  471.    subinfo = *(info);                                    \
  472.    subinfo.primitive_restart = false;                    \
  473.    for (i = (info)->start; i < end; i++) {               \
  474.       if ((ptr)[i] == (info)->restart_index) {           \
  475.          subinfo.count = i - subinfo.start;              \
  476.          if (subinfo.count)                              \
  477.             (pipe)->draw_vbo(pipe, &subinfo);            \
  478.          subinfo.start = i + 1;                          \
  479.       }                                                  \
  480.    }                                                     \
  481.    subinfo.count = i - subinfo.start;                    \
  482.    if (subinfo.count)                                    \
  483.       (pipe)->draw_vbo(pipe, &subinfo);                  \
  484. } while (0)
  485.  
  486.    switch (ib->index_size) {
  487.    case 1:
  488.       DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u8);
  489.       break;
  490.    case 2:
  491.       DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u16);
  492.       break;
  493.    case 4:
  494.       DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u32);
  495.       break;
  496.    default:
  497.       assert(!"unsupported index size");
  498.       break;
  499.    }
  500.  
  501. #undef DRAW_VBO_WITH_SW_RESTART
  502.  
  503.    if (ib->buffer)
  504.       intel_bo_unmap(ilo_buffer(ib->buffer)->bo);
  505. }
  506.  
  507. static bool
  508. draw_vbo_need_sw_restart(const struct ilo_context *ilo,
  509.                          const struct pipe_draw_info *info)
  510. {
  511.    /* the restart index is fixed prior to GEN7.5 */
  512.    if (ilo_dev_gen(ilo->dev) < ILO_GEN(7.5)) {
  513.       const unsigned cut_index =
  514.          (ilo->state_vector.ib.index_size == 1) ? 0xff :
  515.          (ilo->state_vector.ib.index_size == 2) ? 0xffff :
  516.          (ilo->state_vector.ib.index_size == 4) ? 0xffffffff : 0;
  517.  
  518.       if (info->restart_index < cut_index)
  519.          return true;
  520.    }
  521.  
  522.    switch (info->mode) {
  523.    case PIPE_PRIM_POINTS:
  524.    case PIPE_PRIM_LINES:
  525.    case PIPE_PRIM_LINE_STRIP:
  526.    case PIPE_PRIM_TRIANGLES:
  527.    case PIPE_PRIM_TRIANGLE_STRIP:
  528.       /* these never need software fallback */
  529.       return false;
  530.    case PIPE_PRIM_LINE_LOOP:
  531.    case PIPE_PRIM_POLYGON:
  532.    case PIPE_PRIM_QUAD_STRIP:
  533.    case PIPE_PRIM_QUADS:
  534.    case PIPE_PRIM_TRIANGLE_FAN:
  535.       /* these need software fallback prior to GEN7.5 */
  536.       return (ilo_dev_gen(ilo->dev) < ILO_GEN(7.5));
  537.    default:
  538.       /* the rest always needs software fallback */
  539.       return true;
  540.    }
  541. }
  542.  
  543. static void
  544. ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
  545. {
  546.    struct ilo_context *ilo = ilo_context(pipe);
  547.  
  548.    if (ilo_debug & ILO_DEBUG_DRAW) {
  549.       if (info->indexed) {
  550.          ilo_printf("indexed draw %s: "
  551.                "index start %d, count %d, vertex range [%d, %d]\n",
  552.                u_prim_name(info->mode), info->start, info->count,
  553.                info->min_index, info->max_index);
  554.       }
  555.       else {
  556.          ilo_printf("draw %s: vertex start %d, count %d\n",
  557.                u_prim_name(info->mode), info->start, info->count);
  558.       }
  559.  
  560.       ilo_state_vector_dump_dirty(&ilo->state_vector);
  561.    }
  562.  
  563.    if (ilo_skip_rendering(ilo))
  564.       return;
  565.  
  566.    if (info->primitive_restart && info->indexed &&
  567.        draw_vbo_need_sw_restart(ilo, info)) {
  568.       draw_vbo_with_sw_restart(ilo, info);
  569.       return;
  570.    }
  571.  
  572.    ilo_finalize_3d_states(ilo, info);
  573.  
  574.    ilo_shader_cache_upload(ilo->shader_cache, &ilo->cp->builder);
  575.  
  576.    ilo_blit_resolve_framebuffer(ilo);
  577.  
  578.    /* If draw_vbo ever fails, return immediately. */
  579.    if (!draw_vbo(ilo, &ilo->state_vector))
  580.       return;
  581.  
  582.    /* clear dirty status */
  583.    ilo->state_vector.dirty = 0x0;
  584.  
  585.    /* avoid dangling pointer reference */
  586.    ilo->state_vector.draw = NULL;
  587.  
  588.    if (ilo_debug & ILO_DEBUG_NOCACHE)
  589.       ilo_render_emit_flush(ilo->render);
  590. }
  591.  
  592. static void
  593. ilo_texture_barrier(struct pipe_context *pipe)
  594. {
  595.    struct ilo_context *ilo = ilo_context(pipe);
  596.  
  597.    if (ilo->cp->ring != INTEL_RING_RENDER)
  598.       return;
  599.  
  600.    ilo_render_emit_flush(ilo->render);
  601.  
  602.    /* don't know why */
  603.    if (ilo_dev_gen(ilo->dev) >= ILO_GEN(7))
  604.       ilo_cp_submit(ilo->cp, "texture barrier");
  605. }
  606.  
  607. static void
  608. ilo_get_sample_position(struct pipe_context *pipe,
  609.                         unsigned sample_count,
  610.                         unsigned sample_index,
  611.                         float *out_value)
  612. {
  613.    struct ilo_context *ilo = ilo_context(pipe);
  614.  
  615.    ilo_render_get_sample_position(ilo->render,
  616.          sample_count, sample_index,
  617.          &out_value[0], &out_value[1]);
  618. }
  619.  
  620. void
  621. ilo_init_draw(struct ilo_context *ilo)
  622. {
  623.    ilo->draw.cp_owner.own = ilo_draw_own_cp;
  624.    ilo->draw.cp_owner.release = ilo_draw_release_cp;
  625.    ilo->draw.cp_owner.data = (void *) ilo;
  626.    ilo->draw.cp_owner.reserve = 0;
  627.  
  628.    list_inithead(&ilo->draw.queries);
  629. }
  630.  
  631. /**
  632.  * Initialize 3D-related functions.
  633.  */
  634. void
  635. ilo_init_draw_functions(struct ilo_context *ilo)
  636. {
  637.    ilo->base.draw_vbo = ilo_draw_vbo;
  638.    ilo->base.texture_barrier = ilo_texture_barrier;
  639.    ilo->base.get_sample_position = ilo_get_sample_position;
  640. }
  641.