Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * (C) Copyright IBM Corporation 2004, 2005
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the "Software"),
  7.  * to deal in the Software without restriction, including without limitation
  8.  * the rights to use, copy, modify, merge, publish, distribute, sub license,
  9.  * and/or sell copies of the Software, and to permit persons to whom the
  10.  * Software is furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the next
  13.  * paragraph) shall be included in all copies or substantial portions of the
  14.  * Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
  19.  * IBM,
  20.  * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  21.  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  22.  * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23.  * SOFTWARE.
  24.  */
  25.  
  26. #include <inttypes.h>
  27. #include <assert.h>
  28. #include <string.h>
  29.  
  30. #include "glxclient.h"
  31. #include "indirect.h"
  32. #include <GL/glxproto.h>
  33. #include "glxextensions.h"
  34. #include "indirect_vertex_array.h"
  35. #include "indirect_vertex_array_priv.h"
  36.  
  37. #define __GLX_PAD(n) (((n)+3) & ~3)
  38.  
  39. /**
  40.  * \file indirect_vertex_array.c
  41.  * Implement GLX protocol for vertex arrays and vertex buffer objects.
  42.  *
  43.  * The most important function in this fill is \c fill_array_info_cache.
  44.  * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
  45.  * in the DrawArrays protocol.  Certain operations, such as enabling or
  46.  * disabling an array, can invalidate this cache.  \c fill_array_info_cache
  47.  * fills-in this data.  Additionally, it examines the enabled state and
  48.  * other factors to determine what "version" of DrawArrays protocoal can be
  49.  * used.
  50.  *
  51.  * Current, only two versions of DrawArrays protocol are implemented.  The
  52.  * first version is the "none" protocol.  This is the fallback when the
  53.  * server does not support GL 1.1 / EXT_vertex_arrays.  It is implemented
  54.  * by sending batches of immediate mode commands that are equivalent to the
  55.  * DrawArrays protocol.
  56.  *
  57.  * The other protocol that is currently implemented is the "old" protocol.
  58.  * This is the GL 1.1 DrawArrays protocol.  The only difference between GL
  59.  * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
  60.  * This protocol is called "old" because the ARB is in the process of
  61.  * defining a new protocol, which will probably be called wither "new" or
  62.  * "vbo", to support multiple texture coordinate arrays, generic attributes,
  63.  * and vertex buffer objects.
  64.  *
  65.  * \author Ian Romanick <ian.d.romanick@intel.com>
  66.  */
  67.  
  68. static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
  69. static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
  70.  
  71. static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
  72.                                    const GLvoid * indices);
  73. static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
  74.                                   const GLvoid * indices);
  75.  
  76.  
  77. static GLubyte *emit_element_none(GLubyte * dst,
  78.                                   const struct array_state_vector *arrays,
  79.                                   unsigned index);
  80. static GLubyte *emit_element_old(GLubyte * dst,
  81.                                  const struct array_state_vector *arrays,
  82.                                  unsigned index);
  83. static struct array_state *get_array_entry(const struct array_state_vector
  84.                                            *arrays, GLenum key,
  85.                                            unsigned index);
  86. static void fill_array_info_cache(struct array_state_vector *arrays);
  87. static GLboolean validate_mode(struct glx_context * gc, GLenum mode);
  88. static GLboolean validate_count(struct glx_context * gc, GLsizei count);
  89. static GLboolean validate_type(struct glx_context * gc, GLenum type);
  90.  
  91.  
  92. /**
  93.  * Table of sizes, in bytes, of a GL types.  All of the type enums are be in
  94.  * the range 0x1400 - 0x140F.  That includes types added by extensions (i.e.,
  95.  * \c GL_HALF_FLOAT_NV).  This elements of this table correspond to the
  96.  * type enums masked with 0x0f.
  97.  *
  98.  * \notes
  99.  * \c GL_HALF_FLOAT_NV is not included.  Neither are \c GL_2_BYTES,
  100.  * \c GL_3_BYTES, or \c GL_4_BYTES.
  101.  */
  102. const GLuint __glXTypeSize_table[16] = {
  103.    1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
  104. };
  105.  
  106.  
  107. /**
  108.  * Free the per-context array state that was allocated with
  109.  * __glXInitVertexArrayState().
  110.  */
  111. void
  112. __glXFreeVertexArrayState(struct glx_context * gc)
  113. {
  114.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  115.    struct array_state_vector *arrays = state->array_state;
  116.  
  117.    if (arrays) {
  118.       free(arrays->stack);
  119.       arrays->stack = NULL;
  120.       free(arrays->arrays);
  121.       arrays->arrays = NULL;
  122.       free(arrays);
  123.       state->array_state = NULL;
  124.    }
  125. }
  126.  
  127.  
  128. /**
  129.  * Initialize vertex array state of a GLX context.
  130.  *
  131.  * \param gc  GLX context whose vertex array state is to be initialized.
  132.  *
  133.  * \warning
  134.  * This function may only be called after struct glx_context::gl_extension_bits,
  135.  * struct glx_context::server_minor, and __GLXcontext::server_major have been
  136.  * initialized.  These values are used to determine what vertex arrays are
  137.  * supported.
  138.  */
  139. void
  140. __glXInitVertexArrayState(struct glx_context * gc)
  141. {
  142.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  143.    struct array_state_vector *arrays;
  144.  
  145.    unsigned array_count;
  146.    int texture_units = 1, vertex_program_attribs = 0;
  147.    unsigned i, j;
  148.  
  149.    GLboolean got_fog = GL_FALSE;
  150.    GLboolean got_secondary_color = GL_FALSE;
  151.  
  152.  
  153.    arrays = calloc(1, sizeof(struct array_state_vector));
  154.  
  155.    if (arrays == NULL) {
  156.       __glXSetError(gc, GL_OUT_OF_MEMORY);
  157.       return;
  158.    }
  159.  
  160.    arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
  161.    arrays->new_DrawArrays_possible = GL_FALSE;
  162.    arrays->DrawArrays = NULL;
  163.  
  164.    arrays->active_texture_unit = 0;
  165.  
  166.  
  167.    /* Determine how many arrays are actually needed.  Only arrays that
  168.     * are supported by the server are create.  For example, if the server
  169.     * supports only 2 texture units, then only 2 texture coordinate arrays
  170.     * are created.
  171.     *
  172.     * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
  173.     * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
  174.     * GL_EDGE_FLAG_ARRAY are supported.
  175.     */
  176.  
  177.    array_count = 5;
  178.  
  179.    if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
  180.        || (gc->server_major > 1) || (gc->server_minor >= 4)) {
  181.       got_fog = GL_TRUE;
  182.       array_count++;
  183.    }
  184.  
  185.    if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
  186.        || (gc->server_major > 1) || (gc->server_minor >= 4)) {
  187.       got_secondary_color = GL_TRUE;
  188.       array_count++;
  189.    }
  190.  
  191.    if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
  192.        || (gc->server_major > 1) || (gc->server_minor >= 3)) {
  193.       __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
  194.    }
  195.  
  196.    if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
  197.       __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
  198.                                    GL_MAX_PROGRAM_ATTRIBS_ARB,
  199.                                    &vertex_program_attribs);
  200.    }
  201.  
  202.    arrays->num_texture_units = texture_units;
  203.    arrays->num_vertex_program_attribs = vertex_program_attribs;
  204.    array_count += texture_units + vertex_program_attribs;
  205.    arrays->num_arrays = array_count;
  206.    arrays->arrays = calloc(array_count, sizeof(struct array_state));
  207.  
  208.    if (arrays->arrays == NULL) {
  209.       free(arrays);
  210.       __glXSetError(gc, GL_OUT_OF_MEMORY);
  211.       return;
  212.    }
  213.  
  214.    arrays->arrays[0].data_type = GL_FLOAT;
  215.    arrays->arrays[0].count = 3;
  216.    arrays->arrays[0].key = GL_NORMAL_ARRAY;
  217.    arrays->arrays[0].normalized = GL_TRUE;
  218.    arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
  219.  
  220.    arrays->arrays[1].data_type = GL_FLOAT;
  221.    arrays->arrays[1].count = 4;
  222.    arrays->arrays[1].key = GL_COLOR_ARRAY;
  223.    arrays->arrays[1].normalized = GL_TRUE;
  224.    arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
  225.  
  226.    arrays->arrays[2].data_type = GL_FLOAT;
  227.    arrays->arrays[2].count = 1;
  228.    arrays->arrays[2].key = GL_INDEX_ARRAY;
  229.    arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
  230.  
  231.    arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
  232.    arrays->arrays[3].count = 1;
  233.    arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
  234.    arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
  235.  
  236.    for (i = 0; i < texture_units; i++) {
  237.       arrays->arrays[4 + i].data_type = GL_FLOAT;
  238.       arrays->arrays[4 + i].count = 4;
  239.       arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
  240.  
  241.       arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
  242.       arrays->arrays[4 + i].index = i;
  243.  
  244.       arrays->arrays[4 + i].header[1] = i + GL_TEXTURE0;
  245.    }
  246.  
  247.    i = 4 + texture_units;
  248.  
  249.    if (got_fog) {
  250.       arrays->arrays[i].data_type = GL_FLOAT;
  251.       arrays->arrays[i].count = 1;
  252.       arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
  253.       arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
  254.       i++;
  255.    }
  256.  
  257.    if (got_secondary_color) {
  258.       arrays->arrays[i].data_type = GL_FLOAT;
  259.       arrays->arrays[i].count = 3;
  260.       arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
  261.       arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
  262.       arrays->arrays[i].normalized = GL_TRUE;
  263.       i++;
  264.    }
  265.  
  266.  
  267.    for (j = 0; j < vertex_program_attribs; j++) {
  268.       const unsigned idx = (vertex_program_attribs - (j + 1));
  269.  
  270.  
  271.       arrays->arrays[idx + i].data_type = GL_FLOAT;
  272.       arrays->arrays[idx + i].count = 4;
  273.       arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
  274.  
  275.       arrays->arrays[idx + i].old_DrawArrays_possible = 0;
  276.       arrays->arrays[idx + i].index = idx;
  277.  
  278.       arrays->arrays[idx + i].header[1] = idx;
  279.    }
  280.  
  281.    i += vertex_program_attribs;
  282.  
  283.  
  284.    /* Vertex array *must* be last because of the way that
  285.     * emit_DrawArrays_none works.
  286.     */
  287.  
  288.    arrays->arrays[i].data_type = GL_FLOAT;
  289.    arrays->arrays[i].count = 4;
  290.    arrays->arrays[i].key = GL_VERTEX_ARRAY;
  291.    arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
  292.  
  293.    assert((i + 1) == arrays->num_arrays);
  294.  
  295.    arrays->stack_index = 0;
  296.    arrays->stack = malloc(sizeof(struct array_stack_state)
  297.                           * arrays->num_arrays
  298.                           * __GL_CLIENT_ATTRIB_STACK_DEPTH);
  299.  
  300.    if (arrays->stack == NULL) {
  301.       free(arrays->arrays);
  302.       free(arrays);
  303.       __glXSetError(gc, GL_OUT_OF_MEMORY);
  304.       return;
  305.    }
  306.  
  307.    /* Everything went ok so we put vertex array state in place
  308.     * in context.
  309.     */
  310.    state->array_state = arrays;
  311. }
  312.  
  313.  
  314. /**
  315.  * Calculate the size of a single vertex for the "none" protocol.  This is
  316.  * essentially the size of all the immediate-mode commands required to
  317.  * implement the enabled vertex arrays.
  318.  */
  319. static size_t
  320. calculate_single_vertex_size_none(const struct array_state_vector *arrays)
  321. {
  322.    size_t single_vertex_size = 0;
  323.    unsigned i;
  324.  
  325.  
  326.    for (i = 0; i < arrays->num_arrays; i++) {
  327.       if (arrays->arrays[i].enabled) {
  328.          single_vertex_size += ((uint16_t *) arrays->arrays[i].header)[0];
  329.       }
  330.    }
  331.  
  332.    return single_vertex_size;
  333. }
  334.  
  335.  
  336. /**
  337.  * Emit a single element using non-DrawArrays protocol.
  338.  */
  339. GLubyte *
  340. emit_element_none(GLubyte * dst,
  341.                   const struct array_state_vector * arrays, unsigned index)
  342. {
  343.    unsigned i;
  344.  
  345.  
  346.    for (i = 0; i < arrays->num_arrays; i++) {
  347.       if (arrays->arrays[i].enabled) {
  348.          const size_t offset = index * arrays->arrays[i].true_stride;
  349.  
  350.          /* The generic attributes can have more data than is in the
  351.           * elements.  This is because a vertex array can be a 2 element,
  352.           * normalized, unsigned short, but the "closest" immediate mode
  353.           * protocol is for a 4Nus.  Since the sizes are small, the
  354.           * performance impact on modern processors should be negligible.
  355.           */
  356.          (void) memset(dst, 0, ((uint16_t *) arrays->arrays[i].header)[0]);
  357.  
  358.          (void) memcpy(dst, arrays->arrays[i].header,
  359.                        arrays->arrays[i].header_size);
  360.  
  361.          dst += arrays->arrays[i].header_size;
  362.  
  363.          (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
  364.                        arrays->arrays[i].element_size);
  365.  
  366.          dst += __GLX_PAD(arrays->arrays[i].element_size);
  367.       }
  368.    }
  369.  
  370.    return dst;
  371. }
  372.  
  373.  
  374. /**
  375.  * Emit a single element using "old" DrawArrays protocol from
  376.  * EXT_vertex_arrays / OpenGL 1.1.
  377.  */
  378. GLubyte *
  379. emit_element_old(GLubyte * dst,
  380.                  const struct array_state_vector * arrays, unsigned index)
  381. {
  382.    unsigned i;
  383.  
  384.  
  385.    for (i = 0; i < arrays->num_arrays; i++) {
  386.       if (arrays->arrays[i].enabled) {
  387.          const size_t offset = index * arrays->arrays[i].true_stride;
  388.  
  389.          (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
  390.                        arrays->arrays[i].element_size);
  391.  
  392.          dst += __GLX_PAD(arrays->arrays[i].element_size);
  393.       }
  394.    }
  395.  
  396.    return dst;
  397. }
  398.  
  399.  
  400. struct array_state *
  401. get_array_entry(const struct array_state_vector *arrays,
  402.                 GLenum key, unsigned index)
  403. {
  404.    unsigned i;
  405.  
  406.    for (i = 0; i < arrays->num_arrays; i++) {
  407.       if ((arrays->arrays[i].key == key)
  408.           && (arrays->arrays[i].index == index)) {
  409.          return &arrays->arrays[i];
  410.       }
  411.    }
  412.  
  413.    return NULL;
  414. }
  415.  
  416.  
  417. static GLboolean
  418. allocate_array_info_cache(struct array_state_vector *arrays,
  419.                           size_t required_size)
  420. {
  421. #define MAX_HEADER_SIZE 20
  422.    if (arrays->array_info_cache_buffer_size < required_size) {
  423.       GLubyte *temp = realloc(arrays->array_info_cache_base,
  424.                               required_size + MAX_HEADER_SIZE);
  425.  
  426.       if (temp == NULL) {
  427.          return GL_FALSE;
  428.       }
  429.  
  430.       arrays->array_info_cache_base = temp;
  431.       arrays->array_info_cache = temp + MAX_HEADER_SIZE;
  432.       arrays->array_info_cache_buffer_size = required_size;
  433.    }
  434.  
  435.    arrays->array_info_cache_size = required_size;
  436.    return GL_TRUE;
  437. }
  438.  
  439.  
  440. /**
  441.  */
  442. void
  443. fill_array_info_cache(struct array_state_vector *arrays)
  444. {
  445.    GLboolean old_DrawArrays_possible;
  446.    unsigned i;
  447.  
  448.  
  449.    /* Determine how many arrays are enabled.
  450.     */
  451.  
  452.    arrays->enabled_client_array_count = 0;
  453.    old_DrawArrays_possible = arrays->old_DrawArrays_possible;
  454.    for (i = 0; i < arrays->num_arrays; i++) {
  455.       if (arrays->arrays[i].enabled) {
  456.          arrays->enabled_client_array_count++;
  457.          old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
  458.       }
  459.    }
  460.  
  461.    if (arrays->new_DrawArrays_possible) {
  462.       assert(!arrays->new_DrawArrays_possible);
  463.    }
  464.    else if (old_DrawArrays_possible) {
  465.       const size_t required_size = arrays->enabled_client_array_count * 12;
  466.       uint32_t *info;
  467.  
  468.  
  469.       if (!allocate_array_info_cache(arrays, required_size)) {
  470.          return;
  471.       }
  472.  
  473.  
  474.       info = (uint32_t *) arrays->array_info_cache;
  475.       for (i = 0; i < arrays->num_arrays; i++) {
  476.          if (arrays->arrays[i].enabled) {
  477.             *(info++) = arrays->arrays[i].data_type;
  478.             *(info++) = arrays->arrays[i].count;
  479.             *(info++) = arrays->arrays[i].key;
  480.          }
  481.       }
  482.  
  483.       arrays->DrawArrays = emit_DrawArrays_old;
  484.       arrays->DrawElements = emit_DrawElements_old;
  485.    }
  486.    else {
  487.       arrays->DrawArrays = emit_DrawArrays_none;
  488.       arrays->DrawElements = emit_DrawElements_none;
  489.    }
  490.  
  491.    arrays->array_info_cache_valid = GL_TRUE;
  492. }
  493.  
  494.  
  495. /**
  496.  * Emit a \c glDrawArrays command using the "none" protocol.  That is,
  497.  * emit immediate-mode commands that are equivalent to the requiested
  498.  * \c glDrawArrays command.  This is used with servers that don't support
  499.  * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
  500.  * vertex state is enabled that is not compatible with that protocol.
  501.  */
  502. void
  503. emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
  504. {
  505.    struct glx_context *gc = __glXGetCurrentContext();
  506.    const __GLXattribute *state =
  507.       (const __GLXattribute *) (gc->client_state_private);
  508.    struct array_state_vector *arrays = state->array_state;
  509.  
  510.    size_t single_vertex_size;
  511.    GLubyte *pc;
  512.    unsigned i;
  513.    static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
  514.    static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
  515.  
  516.  
  517.    single_vertex_size = calculate_single_vertex_size_none(arrays);
  518.  
  519.    pc = gc->pc;
  520.  
  521.    (void) memcpy(pc, begin_cmd, 4);
  522.    *(int *) (pc + 4) = mode;
  523.  
  524.    pc += 8;
  525.  
  526.    for (i = 0; i < count; i++) {
  527.       if ((pc + single_vertex_size) >= gc->bufEnd) {
  528.          pc = __glXFlushRenderBuffer(gc, pc);
  529.       }
  530.  
  531.       pc = emit_element_none(pc, arrays, first + i);
  532.    }
  533.  
  534.    if ((pc + 4) >= gc->bufEnd) {
  535.       pc = __glXFlushRenderBuffer(gc, pc);
  536.    }
  537.  
  538.    (void) memcpy(pc, end_cmd, 4);
  539.    pc += 4;
  540.  
  541.    gc->pc = pc;
  542.    if (gc->pc > gc->limit) {
  543.       (void) __glXFlushRenderBuffer(gc, gc->pc);
  544.    }
  545. }
  546.  
  547.  
  548. /**
  549.  * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
  550.  * protocol.
  551.  *
  552.  * \param gc                    GLX context.
  553.  * \param arrays                Array state.
  554.  * \param elements_per_request  Location to store the number of elements that
  555.  *                              can fit in a single Render / RenderLarge
  556.  *                              command.
  557.  * \param total_request         Total number of requests for a RenderLarge
  558.  *                              command.  If a Render command is used, this
  559.  *                              will be zero.
  560.  * \param mode                  Drawing mode.
  561.  * \param count                 Number of vertices.
  562.  *
  563.  * \returns
  564.  * A pointer to the buffer for array data.
  565.  */
  566. static GLubyte *
  567. emit_DrawArrays_header_old(struct glx_context * gc,
  568.                            struct array_state_vector *arrays,
  569.                            size_t * elements_per_request,
  570.                            unsigned int *total_requests,
  571.                            GLenum mode, GLsizei count)
  572. {
  573.    size_t command_size;
  574.    size_t single_vertex_size;
  575.    const unsigned header_size = 16;
  576.    unsigned i;
  577.    GLubyte *pc;
  578.  
  579.  
  580.    /* Determine the size of the whole command.  This includes the header,
  581.     * the ARRAY_INFO data and the array data.  Once this size is calculated,
  582.     * it will be known whether a Render or RenderLarge command is needed.
  583.     */
  584.  
  585.    single_vertex_size = 0;
  586.    for (i = 0; i < arrays->num_arrays; i++) {
  587.       if (arrays->arrays[i].enabled) {
  588.          single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
  589.       }
  590.    }
  591.  
  592.    command_size = arrays->array_info_cache_size + header_size
  593.       + (single_vertex_size * count);
  594.  
  595.  
  596.    /* Write the header for either a Render command or a RenderLarge
  597.     * command.  After the header is written, write the ARRAY_INFO data.
  598.     */
  599.  
  600.    if (command_size > gc->maxSmallRenderCommandSize) {
  601.       /* maxSize is the maximum amount of data can be stuffed into a single
  602.        * packet.  sz_xGLXRenderReq is added because bufSize is the maximum
  603.        * packet size minus sz_xGLXRenderReq.
  604.        */
  605.       const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
  606.          - sz_xGLXRenderLargeReq;
  607.       unsigned vertex_requests;
  608.  
  609.  
  610.       /* Calculate the number of data packets that will be required to send
  611.        * the whole command.  To do this, the number of verticies that
  612.        * will fit in a single buffer must be calculated.
  613.        *
  614.        * The important value here is elements_per_request.  This is the
  615.        * number of complete array elements that will fit in a single
  616.        * buffer.  There may be some wasted space at the end of the buffer,
  617.        * but splitting elements across buffer boundries would be painful.
  618.        */
  619.  
  620.       elements_per_request[0] = maxSize / single_vertex_size;
  621.  
  622.       vertex_requests = (count + elements_per_request[0] - 1)
  623.          / elements_per_request[0];
  624.  
  625.       *total_requests = vertex_requests + 1;
  626.  
  627.  
  628.       __glXFlushRenderBuffer(gc, gc->pc);
  629.  
  630.       command_size += 4;
  631.  
  632.       pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
  633.       *(uint32_t *) (pc + 0) = command_size;
  634.       *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
  635.       *(uint32_t *) (pc + 8) = count;
  636.       *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
  637.       *(uint32_t *) (pc + 16) = mode;
  638.  
  639.       __glXSendLargeChunk(gc, 1, *total_requests, pc,
  640.                           header_size + 4 + arrays->array_info_cache_size);
  641.  
  642.       pc = gc->pc;
  643.    }
  644.    else {
  645.       if ((gc->pc + command_size) >= gc->bufEnd) {
  646.          (void) __glXFlushRenderBuffer(gc, gc->pc);
  647.       }
  648.  
  649.       pc = gc->pc;
  650.       *(uint16_t *) (pc + 0) = command_size;
  651.       *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
  652.       *(uint32_t *) (pc + 4) = count;
  653.       *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
  654.       *(uint32_t *) (pc + 12) = mode;
  655.  
  656.       pc += header_size;
  657.  
  658.       (void) memcpy(pc, arrays->array_info_cache,
  659.                     arrays->array_info_cache_size);
  660.       pc += arrays->array_info_cache_size;
  661.  
  662.       *elements_per_request = count;
  663.       *total_requests = 0;
  664.    }
  665.  
  666.  
  667.    return pc;
  668. }
  669.  
  670.  
  671. /**
  672.  */
  673. void
  674. emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
  675. {
  676.    struct glx_context *gc = __glXGetCurrentContext();
  677.    const __GLXattribute *state =
  678.       (const __GLXattribute *) (gc->client_state_private);
  679.    struct array_state_vector *arrays = state->array_state;
  680.  
  681.    GLubyte *pc;
  682.    size_t elements_per_request;
  683.    unsigned total_requests = 0;
  684.    unsigned i;
  685.    size_t total_sent = 0;
  686.  
  687.  
  688.    pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
  689.                                    &total_requests, mode, count);
  690.  
  691.  
  692.    /* Write the arrays.
  693.     */
  694.  
  695.    if (total_requests == 0) {
  696.       assert(elements_per_request >= count);
  697.  
  698.       for (i = 0; i < count; i++) {
  699.          pc = emit_element_old(pc, arrays, i + first);
  700.       }
  701.  
  702.       assert(pc <= gc->bufEnd);
  703.  
  704.       gc->pc = pc;
  705.       if (gc->pc > gc->limit) {
  706.          (void) __glXFlushRenderBuffer(gc, gc->pc);
  707.       }
  708.    }
  709.    else {
  710.       unsigned req;
  711.  
  712.  
  713.       for (req = 2; req <= total_requests; req++) {
  714.          if (count < elements_per_request) {
  715.             elements_per_request = count;
  716.          }
  717.  
  718.          pc = gc->pc;
  719.          for (i = 0; i < elements_per_request; i++) {
  720.             pc = emit_element_old(pc, arrays, i + first);
  721.          }
  722.  
  723.          first += elements_per_request;
  724.  
  725.          total_sent += (size_t) (pc - gc->pc);
  726.          __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
  727.  
  728.          count -= elements_per_request;
  729.       }
  730.    }
  731. }
  732.  
  733.  
  734. void
  735. emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
  736.                        const GLvoid * indices)
  737. {
  738.    struct glx_context *gc = __glXGetCurrentContext();
  739.    const __GLXattribute *state =
  740.       (const __GLXattribute *) (gc->client_state_private);
  741.    struct array_state_vector *arrays = state->array_state;
  742.    static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
  743.    static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
  744.  
  745.    GLubyte *pc;
  746.    size_t single_vertex_size;
  747.    unsigned i;
  748.  
  749.  
  750.    single_vertex_size = calculate_single_vertex_size_none(arrays);
  751.  
  752.  
  753.    if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
  754.       gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
  755.    }
  756.  
  757.    pc = gc->pc;
  758.  
  759.    (void) memcpy(pc, begin_cmd, 4);
  760.    *(int *) (pc + 4) = mode;
  761.  
  762.    pc += 8;
  763.  
  764.    for (i = 0; i < count; i++) {
  765.       unsigned index = 0;
  766.  
  767.       if ((pc + single_vertex_size) >= gc->bufEnd) {
  768.          pc = __glXFlushRenderBuffer(gc, pc);
  769.       }
  770.  
  771.       switch (type) {
  772.       case GL_UNSIGNED_INT:
  773.          index = (unsigned) (((GLuint *) indices)[i]);
  774.          break;
  775.       case GL_UNSIGNED_SHORT:
  776.          index = (unsigned) (((GLushort *) indices)[i]);
  777.          break;
  778.       case GL_UNSIGNED_BYTE:
  779.          index = (unsigned) (((GLubyte *) indices)[i]);
  780.          break;
  781.       }
  782.       pc = emit_element_none(pc, arrays, index);
  783.    }
  784.  
  785.    if ((pc + 4) >= gc->bufEnd) {
  786.       pc = __glXFlushRenderBuffer(gc, pc);
  787.    }
  788.  
  789.    (void) memcpy(pc, end_cmd, 4);
  790.    pc += 4;
  791.  
  792.    gc->pc = pc;
  793.    if (gc->pc > gc->limit) {
  794.       (void) __glXFlushRenderBuffer(gc, gc->pc);
  795.    }
  796. }
  797.  
  798.  
  799. /**
  800.  */
  801. void
  802. emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
  803.                       const GLvoid * indices)
  804. {
  805.    struct glx_context *gc = __glXGetCurrentContext();
  806.    const __GLXattribute *state =
  807.       (const __GLXattribute *) (gc->client_state_private);
  808.    struct array_state_vector *arrays = state->array_state;
  809.  
  810.    GLubyte *pc;
  811.    size_t elements_per_request;
  812.    unsigned total_requests = 0;
  813.    unsigned i;
  814.    unsigned req;
  815.    unsigned req_element = 0;
  816.  
  817.  
  818.    pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
  819.                                    &total_requests, mode, count);
  820.  
  821.  
  822.    /* Write the arrays.
  823.     */
  824.  
  825.    req = 2;
  826.    while (count > 0) {
  827.       if (count < elements_per_request) {
  828.          elements_per_request = count;
  829.       }
  830.  
  831.       switch (type) {
  832.       case GL_UNSIGNED_INT:{
  833.             const GLuint *ui_ptr = (const GLuint *) indices + req_element;
  834.  
  835.             for (i = 0; i < elements_per_request; i++) {
  836.                const GLint index = (GLint) * (ui_ptr++);
  837.                pc = emit_element_old(pc, arrays, index);
  838.             }
  839.             break;
  840.          }
  841.       case GL_UNSIGNED_SHORT:{
  842.             const GLushort *us_ptr = (const GLushort *) indices + req_element;
  843.  
  844.             for (i = 0; i < elements_per_request; i++) {
  845.                const GLint index = (GLint) * (us_ptr++);
  846.                pc = emit_element_old(pc, arrays, index);
  847.             }
  848.             break;
  849.          }
  850.       case GL_UNSIGNED_BYTE:{
  851.             const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
  852.  
  853.             for (i = 0; i < elements_per_request; i++) {
  854.                const GLint index = (GLint) * (ub_ptr++);
  855.                pc = emit_element_old(pc, arrays, index);
  856.             }
  857.             break;
  858.          }
  859.       }
  860.  
  861.       if (total_requests != 0) {
  862.          __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
  863.          pc = gc->pc;
  864.          req++;
  865.       }
  866.  
  867.       count -= elements_per_request;
  868.       req_element += elements_per_request;
  869.    }
  870.  
  871.  
  872.    assert((total_requests == 0) || ((req - 1) == total_requests));
  873.  
  874.    if (total_requests == 0) {
  875.       assert(pc <= gc->bufEnd);
  876.  
  877.       gc->pc = pc;
  878.       if (gc->pc > gc->limit) {
  879.          (void) __glXFlushRenderBuffer(gc, gc->pc);
  880.       }
  881.    }
  882. }
  883.  
  884.  
  885. /**
  886.  * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
  887.  * If it is not valid, then an error code is set in the GLX context.
  888.  *
  889.  * \returns
  890.  * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
  891.  */
  892. static GLboolean
  893. validate_mode(struct glx_context * gc, GLenum mode)
  894. {
  895.    switch (mode) {
  896.    case GL_POINTS:
  897.    case GL_LINE_STRIP:
  898.    case GL_LINE_LOOP:
  899.    case GL_LINES:
  900.    case GL_TRIANGLE_STRIP:
  901.    case GL_TRIANGLE_FAN:
  902.    case GL_TRIANGLES:
  903.    case GL_QUAD_STRIP:
  904.    case GL_QUADS:
  905.    case GL_POLYGON:
  906.       break;
  907.    default:
  908.       __glXSetError(gc, GL_INVALID_ENUM);
  909.       return GL_FALSE;
  910.    }
  911.  
  912.    return GL_TRUE;
  913. }
  914.  
  915.  
  916. /**
  917.  * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
  918.  * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
  919.  * being set.  A value of zero will not result in an error being set, but
  920.  * will result in \c GL_FALSE being returned.
  921.  *
  922.  * \returns
  923.  * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
  924.  */
  925. static GLboolean
  926. validate_count(struct glx_context * gc, GLsizei count)
  927. {
  928.    if (count < 0) {
  929.       __glXSetError(gc, GL_INVALID_VALUE);
  930.    }
  931.  
  932.    return (count > 0);
  933. }
  934.  
  935.  
  936. /**
  937.  * Validate that the \c type parameter to \c glDrawElements, et. al. is
  938.  * valid.  Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
  939.  * \c GL_UNSIGNED_INT are valid.
  940.  *
  941.  * \returns
  942.  * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
  943.  */
  944. static GLboolean
  945. validate_type(struct glx_context * gc, GLenum type)
  946. {
  947.    switch (type) {
  948.    case GL_UNSIGNED_INT:
  949.    case GL_UNSIGNED_SHORT:
  950.    case GL_UNSIGNED_BYTE:
  951.       return GL_TRUE;
  952.    default:
  953.       __glXSetError(gc, GL_INVALID_ENUM);
  954.       return GL_FALSE;
  955.    }
  956. }
  957.  
  958.  
  959. void
  960. __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
  961. {
  962.    struct glx_context *gc = __glXGetCurrentContext();
  963.    const __GLXattribute *state =
  964.       (const __GLXattribute *) (gc->client_state_private);
  965.    struct array_state_vector *arrays = state->array_state;
  966.  
  967.  
  968.    if (validate_mode(gc, mode) && validate_count(gc, count)) {
  969.       if (!arrays->array_info_cache_valid) {
  970.          fill_array_info_cache(arrays);
  971.       }
  972.  
  973.       arrays->DrawArrays(mode, first, count);
  974.    }
  975. }
  976.  
  977.  
  978. void
  979. __indirect_glArrayElement(GLint index)
  980. {
  981.    struct glx_context *gc = __glXGetCurrentContext();
  982.    const __GLXattribute *state =
  983.       (const __GLXattribute *) (gc->client_state_private);
  984.    struct array_state_vector *arrays = state->array_state;
  985.  
  986.    size_t single_vertex_size;
  987.  
  988.  
  989.    single_vertex_size = calculate_single_vertex_size_none(arrays);
  990.  
  991.    if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
  992.       gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
  993.    }
  994.  
  995.    gc->pc = emit_element_none(gc->pc, arrays, index);
  996.  
  997.    if (gc->pc > gc->limit) {
  998.       (void) __glXFlushRenderBuffer(gc, gc->pc);
  999.    }
  1000. }
  1001.  
  1002.  
  1003. void
  1004. __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
  1005.                           const GLvoid * indices)
  1006. {
  1007.    struct glx_context *gc = __glXGetCurrentContext();
  1008.    const __GLXattribute *state =
  1009.       (const __GLXattribute *) (gc->client_state_private);
  1010.    struct array_state_vector *arrays = state->array_state;
  1011.  
  1012.  
  1013.    if (validate_mode(gc, mode) && validate_count(gc, count)
  1014.        && validate_type(gc, type)) {
  1015.       if (!arrays->array_info_cache_valid) {
  1016.          fill_array_info_cache(arrays);
  1017.       }
  1018.  
  1019.       arrays->DrawElements(mode, count, type, indices);
  1020.    }
  1021. }
  1022.  
  1023.  
  1024. void
  1025. __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
  1026.                                GLsizei count, GLenum type,
  1027.                                const GLvoid * indices)
  1028. {
  1029.    struct glx_context *gc = __glXGetCurrentContext();
  1030.    const __GLXattribute *state =
  1031.       (const __GLXattribute *) (gc->client_state_private);
  1032.    struct array_state_vector *arrays = state->array_state;
  1033.  
  1034.  
  1035.    if (validate_mode(gc, mode) && validate_count(gc, count)
  1036.        && validate_type(gc, type)) {
  1037.       if (end < start) {
  1038.          __glXSetError(gc, GL_INVALID_VALUE);
  1039.          return;
  1040.       }
  1041.  
  1042.       if (!arrays->array_info_cache_valid) {
  1043.          fill_array_info_cache(arrays);
  1044.       }
  1045.  
  1046.       arrays->DrawElements(mode, count, type, indices);
  1047.    }
  1048. }
  1049.  
  1050.  
  1051. void
  1052. __indirect_glMultiDrawArrays(GLenum mode, const GLint *first,
  1053.                                 const GLsizei *count, GLsizei primcount)
  1054. {
  1055.    struct glx_context *gc = __glXGetCurrentContext();
  1056.    const __GLXattribute *state =
  1057.       (const __GLXattribute *) (gc->client_state_private);
  1058.    struct array_state_vector *arrays = state->array_state;
  1059.    GLsizei i;
  1060.  
  1061.  
  1062.    if (validate_mode(gc, mode)) {
  1063.       if (!arrays->array_info_cache_valid) {
  1064.          fill_array_info_cache(arrays);
  1065.       }
  1066.  
  1067.       for (i = 0; i < primcount; i++) {
  1068.          if (validate_count(gc, count[i])) {
  1069.             arrays->DrawArrays(mode, first[i], count[i]);
  1070.          }
  1071.       }
  1072.    }
  1073. }
  1074.  
  1075.  
  1076. void
  1077. __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
  1078.                                   GLenum type, const GLvoid * const * indices,
  1079.                                   GLsizei primcount)
  1080. {
  1081.    struct glx_context *gc = __glXGetCurrentContext();
  1082.    const __GLXattribute *state =
  1083.       (const __GLXattribute *) (gc->client_state_private);
  1084.    struct array_state_vector *arrays = state->array_state;
  1085.    GLsizei i;
  1086.  
  1087.  
  1088.    if (validate_mode(gc, mode) && validate_type(gc, type)) {
  1089.       if (!arrays->array_info_cache_valid) {
  1090.          fill_array_info_cache(arrays);
  1091.       }
  1092.  
  1093.       for (i = 0; i < primcount; i++) {
  1094.          if (validate_count(gc, count[i])) {
  1095.             arrays->DrawElements(mode, count[i], type, indices[i]);
  1096.          }
  1097.       }
  1098.    }
  1099. }
  1100.  
  1101.  
  1102. #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
  1103.   do {                                                                  \
  1104.     (a)->data = PTR;                                                    \
  1105.     (a)->data_type = TYPE;                                              \
  1106.     (a)->user_stride = STRIDE;                                          \
  1107.     (a)->count = COUNT;                                                 \
  1108.     (a)->normalized = NORMALIZED;                                       \
  1109.                                                                         \
  1110.     (a)->element_size = __glXTypeSize( TYPE ) * COUNT;                  \
  1111.     (a)->true_stride = (STRIDE == 0)                                    \
  1112.       ? (a)->element_size : STRIDE;                                     \
  1113.                                                                         \
  1114.     (a)->header_size = HDR_SIZE;                                        \
  1115.     ((uint16_t *) (a)->header)[0] = __GLX_PAD((a)->header_size + (a)->element_size); \
  1116.     ((uint16_t *) (a)->header)[1] = OPCODE;                             \
  1117.   } while(0)
  1118.  
  1119.  
  1120. void
  1121. __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
  1122.                            const GLvoid * pointer)
  1123. {
  1124.    static const uint16_t short_ops[5] = {
  1125.       0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
  1126.    };
  1127.    static const uint16_t int_ops[5] = {
  1128.       0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
  1129.    };
  1130.    static const uint16_t float_ops[5] = {
  1131.       0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
  1132.    };
  1133.    static const uint16_t double_ops[5] = {
  1134.       0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
  1135.    };
  1136.    uint16_t opcode;
  1137.    struct glx_context *gc = __glXGetCurrentContext();
  1138.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1139.    struct array_state_vector *arrays = state->array_state;
  1140.    struct array_state *a;
  1141.  
  1142.  
  1143.    if (size < 2 || size > 4 || stride < 0) {
  1144.       __glXSetError(gc, GL_INVALID_VALUE);
  1145.       return;
  1146.    }
  1147.  
  1148.    switch (type) {
  1149.    case GL_SHORT:
  1150.       opcode = short_ops[size];
  1151.       break;
  1152.    case GL_INT:
  1153.       opcode = int_ops[size];
  1154.       break;
  1155.    case GL_FLOAT:
  1156.       opcode = float_ops[size];
  1157.       break;
  1158.    case GL_DOUBLE:
  1159.       opcode = double_ops[size];
  1160.       break;
  1161.    default:
  1162.       __glXSetError(gc, GL_INVALID_ENUM);
  1163.       return;
  1164.    }
  1165.  
  1166.    a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
  1167.    assert(a != NULL);
  1168.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
  1169.                           opcode);
  1170.  
  1171.    if (a->enabled) {
  1172.       arrays->array_info_cache_valid = GL_FALSE;
  1173.    }
  1174. }
  1175.  
  1176.  
  1177. void
  1178. __indirect_glNormalPointer(GLenum type, GLsizei stride,
  1179.                            const GLvoid * pointer)
  1180. {
  1181.    uint16_t opcode;
  1182.    struct glx_context *gc = __glXGetCurrentContext();
  1183.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1184.    struct array_state_vector *arrays = state->array_state;
  1185.    struct array_state *a;
  1186.  
  1187.  
  1188.    if (stride < 0) {
  1189.       __glXSetError(gc, GL_INVALID_VALUE);
  1190.       return;
  1191.    }
  1192.  
  1193.    switch (type) {
  1194.    case GL_BYTE:
  1195.       opcode = X_GLrop_Normal3bv;
  1196.       break;
  1197.    case GL_SHORT:
  1198.       opcode = X_GLrop_Normal3sv;
  1199.       break;
  1200.    case GL_INT:
  1201.       opcode = X_GLrop_Normal3iv;
  1202.       break;
  1203.    case GL_FLOAT:
  1204.       opcode = X_GLrop_Normal3fv;
  1205.       break;
  1206.    case GL_DOUBLE:
  1207.       opcode = X_GLrop_Normal3dv;
  1208.       break;
  1209.    default:
  1210.       __glXSetError(gc, GL_INVALID_ENUM);
  1211.       return;
  1212.    }
  1213.  
  1214.    a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
  1215.    assert(a != NULL);
  1216.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
  1217.  
  1218.    if (a->enabled) {
  1219.       arrays->array_info_cache_valid = GL_FALSE;
  1220.    }
  1221. }
  1222.  
  1223.  
  1224. void
  1225. __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
  1226.                           const GLvoid * pointer)
  1227. {
  1228.    static const uint16_t byte_ops[5] = {
  1229.       0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
  1230.    };
  1231.    static const uint16_t ubyte_ops[5] = {
  1232.       0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
  1233.    };
  1234.    static const uint16_t short_ops[5] = {
  1235.       0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
  1236.    };
  1237.    static const uint16_t ushort_ops[5] = {
  1238.       0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
  1239.    };
  1240.    static const uint16_t int_ops[5] = {
  1241.       0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
  1242.    };
  1243.    static const uint16_t uint_ops[5] = {
  1244.       0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
  1245.    };
  1246.    static const uint16_t float_ops[5] = {
  1247.       0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
  1248.    };
  1249.    static const uint16_t double_ops[5] = {
  1250.       0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
  1251.    };
  1252.    uint16_t opcode;
  1253.    struct glx_context *gc = __glXGetCurrentContext();
  1254.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1255.    struct array_state_vector *arrays = state->array_state;
  1256.    struct array_state *a;
  1257.  
  1258.  
  1259.    if (size < 3 || size > 4 || stride < 0) {
  1260.       __glXSetError(gc, GL_INVALID_VALUE);
  1261.       return;
  1262.    }
  1263.  
  1264.    switch (type) {
  1265.    case GL_BYTE:
  1266.       opcode = byte_ops[size];
  1267.       break;
  1268.    case GL_UNSIGNED_BYTE:
  1269.       opcode = ubyte_ops[size];
  1270.       break;
  1271.    case GL_SHORT:
  1272.       opcode = short_ops[size];
  1273.       break;
  1274.    case GL_UNSIGNED_SHORT:
  1275.       opcode = ushort_ops[size];
  1276.       break;
  1277.    case GL_INT:
  1278.       opcode = int_ops[size];
  1279.       break;
  1280.    case GL_UNSIGNED_INT:
  1281.       opcode = uint_ops[size];
  1282.       break;
  1283.    case GL_FLOAT:
  1284.       opcode = float_ops[size];
  1285.       break;
  1286.    case GL_DOUBLE:
  1287.       opcode = double_ops[size];
  1288.       break;
  1289.    default:
  1290.       __glXSetError(gc, GL_INVALID_ENUM);
  1291.       return;
  1292.    }
  1293.  
  1294.    a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
  1295.    assert(a != NULL);
  1296.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
  1297.  
  1298.    if (a->enabled) {
  1299.       arrays->array_info_cache_valid = GL_FALSE;
  1300.    }
  1301. }
  1302.  
  1303.  
  1304. void
  1305. __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
  1306. {
  1307.    uint16_t opcode;
  1308.    struct glx_context *gc = __glXGetCurrentContext();
  1309.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1310.    struct array_state_vector *arrays = state->array_state;
  1311.    struct array_state *a;
  1312.  
  1313.  
  1314.    if (stride < 0) {
  1315.       __glXSetError(gc, GL_INVALID_VALUE);
  1316.       return;
  1317.    }
  1318.  
  1319.    switch (type) {
  1320.    case GL_UNSIGNED_BYTE:
  1321.       opcode = X_GLrop_Indexubv;
  1322.       break;
  1323.    case GL_SHORT:
  1324.       opcode = X_GLrop_Indexsv;
  1325.       break;
  1326.    case GL_INT:
  1327.       opcode = X_GLrop_Indexiv;
  1328.       break;
  1329.    case GL_FLOAT:
  1330.       opcode = X_GLrop_Indexfv;
  1331.       break;
  1332.    case GL_DOUBLE:
  1333.       opcode = X_GLrop_Indexdv;
  1334.       break;
  1335.    default:
  1336.       __glXSetError(gc, GL_INVALID_ENUM);
  1337.       return;
  1338.    }
  1339.  
  1340.    a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
  1341.    assert(a != NULL);
  1342.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
  1343.  
  1344.    if (a->enabled) {
  1345.       arrays->array_info_cache_valid = GL_FALSE;
  1346.    }
  1347. }
  1348.  
  1349.  
  1350. void
  1351. __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
  1352. {
  1353.    struct glx_context *gc = __glXGetCurrentContext();
  1354.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1355.    struct array_state_vector *arrays = state->array_state;
  1356.    struct array_state *a;
  1357.  
  1358.  
  1359.    if (stride < 0) {
  1360.       __glXSetError(gc, GL_INVALID_VALUE);
  1361.       return;
  1362.    }
  1363.  
  1364.  
  1365.    a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
  1366.    assert(a != NULL);
  1367.    COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
  1368.                           4, X_GLrop_EdgeFlagv);
  1369.  
  1370.    if (a->enabled) {
  1371.       arrays->array_info_cache_valid = GL_FALSE;
  1372.    }
  1373. }
  1374.  
  1375.  
  1376. void
  1377. __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
  1378.                              const GLvoid * pointer)
  1379. {
  1380.    static const uint16_t short_ops[5] = {
  1381.       0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
  1382.       X_GLrop_TexCoord4sv
  1383.    };
  1384.    static const uint16_t int_ops[5] = {
  1385.       0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
  1386.       X_GLrop_TexCoord4iv
  1387.    };
  1388.    static const uint16_t float_ops[5] = {
  1389.       0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
  1390.       X_GLrop_TexCoord4fv
  1391.    };
  1392.    static const uint16_t double_ops[5] = {
  1393.       0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
  1394.       X_GLrop_TexCoord4dv
  1395.    };
  1396.  
  1397.    static const uint16_t mshort_ops[5] = {
  1398.       0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
  1399.       X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
  1400.    };
  1401.    static const uint16_t mint_ops[5] = {
  1402.       0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
  1403.       X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
  1404.    };
  1405.    static const uint16_t mfloat_ops[5] = {
  1406.       0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2fvARB,
  1407.       X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
  1408.    };
  1409.    static const uint16_t mdouble_ops[5] = {
  1410.       0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
  1411.       X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
  1412.    };
  1413.  
  1414.    uint16_t opcode;
  1415.    struct glx_context *gc = __glXGetCurrentContext();
  1416.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1417.    struct array_state_vector *arrays = state->array_state;
  1418.    struct array_state *a;
  1419.    unsigned header_size;
  1420.    unsigned index;
  1421.  
  1422.  
  1423.    if (size < 1 || size > 4 || stride < 0) {
  1424.       __glXSetError(gc, GL_INVALID_VALUE);
  1425.       return;
  1426.    }
  1427.  
  1428.    index = arrays->active_texture_unit;
  1429.    if (index == 0) {
  1430.       switch (type) {
  1431.       case GL_SHORT:
  1432.          opcode = short_ops[size];
  1433.          break;
  1434.       case GL_INT:
  1435.          opcode = int_ops[size];
  1436.          break;
  1437.       case GL_FLOAT:
  1438.          opcode = float_ops[size];
  1439.          break;
  1440.       case GL_DOUBLE:
  1441.          opcode = double_ops[size];
  1442.          break;
  1443.       default:
  1444.          __glXSetError(gc, GL_INVALID_ENUM);
  1445.          return;
  1446.       }
  1447.  
  1448.       header_size = 4;
  1449.    }
  1450.    else {
  1451.       switch (type) {
  1452.       case GL_SHORT:
  1453.          opcode = mshort_ops[size];
  1454.          break;
  1455.       case GL_INT:
  1456.          opcode = mint_ops[size];
  1457.          break;
  1458.       case GL_FLOAT:
  1459.          opcode = mfloat_ops[size];
  1460.          break;
  1461.       case GL_DOUBLE:
  1462.          opcode = mdouble_ops[size];
  1463.          break;
  1464.       default:
  1465.          __glXSetError(gc, GL_INVALID_ENUM);
  1466.          return;
  1467.       }
  1468.  
  1469.       header_size = 8;
  1470.    }
  1471.  
  1472.    a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
  1473.    assert(a != NULL);
  1474.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
  1475.                           header_size, opcode);
  1476.  
  1477.    if (a->enabled) {
  1478.       arrays->array_info_cache_valid = GL_FALSE;
  1479.    }
  1480. }
  1481.  
  1482.  
  1483. void
  1484. __indirect_glSecondaryColorPointer(GLint size, GLenum type, GLsizei stride,
  1485.                                       const GLvoid * pointer)
  1486. {
  1487.    uint16_t opcode;
  1488.    struct glx_context *gc = __glXGetCurrentContext();
  1489.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1490.    struct array_state_vector *arrays = state->array_state;
  1491.    struct array_state *a;
  1492.  
  1493.  
  1494.    if (size != 3 || stride < 0) {
  1495.       __glXSetError(gc, GL_INVALID_VALUE);
  1496.       return;
  1497.    }
  1498.  
  1499.    switch (type) {
  1500.    case GL_BYTE:
  1501.       opcode = 4126;
  1502.       break;
  1503.    case GL_UNSIGNED_BYTE:
  1504.       opcode = 4131;
  1505.       break;
  1506.    case GL_SHORT:
  1507.       opcode = 4127;
  1508.       break;
  1509.    case GL_UNSIGNED_SHORT:
  1510.       opcode = 4132;
  1511.       break;
  1512.    case GL_INT:
  1513.       opcode = 4128;
  1514.       break;
  1515.    case GL_UNSIGNED_INT:
  1516.       opcode = 4133;
  1517.       break;
  1518.    case GL_FLOAT:
  1519.       opcode = 4129;
  1520.       break;
  1521.    case GL_DOUBLE:
  1522.       opcode = 4130;
  1523.       break;
  1524.    default:
  1525.       __glXSetError(gc, GL_INVALID_ENUM);
  1526.       return;
  1527.    }
  1528.  
  1529.    a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
  1530.    if (a == NULL) {
  1531.       __glXSetError(gc, GL_INVALID_OPERATION);
  1532.       return;
  1533.    }
  1534.  
  1535.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
  1536.  
  1537.    if (a->enabled) {
  1538.       arrays->array_info_cache_valid = GL_FALSE;
  1539.    }
  1540. }
  1541.  
  1542.  
  1543. void
  1544. __indirect_glFogCoordPointer(GLenum type, GLsizei stride,
  1545.                                 const GLvoid * pointer)
  1546. {
  1547.    uint16_t opcode;
  1548.    struct glx_context *gc = __glXGetCurrentContext();
  1549.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1550.    struct array_state_vector *arrays = state->array_state;
  1551.    struct array_state *a;
  1552.  
  1553.  
  1554.    if (stride < 0) {
  1555.       __glXSetError(gc, GL_INVALID_VALUE);
  1556.       return;
  1557.    }
  1558.  
  1559.    switch (type) {
  1560.    case GL_FLOAT:
  1561.       opcode = 4124;
  1562.       break;
  1563.    case GL_DOUBLE:
  1564.       opcode = 4125;
  1565.       break;
  1566.    default:
  1567.       __glXSetError(gc, GL_INVALID_ENUM);
  1568.       return;
  1569.    }
  1570.  
  1571.    a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
  1572.    if (a == NULL) {
  1573.       __glXSetError(gc, GL_INVALID_OPERATION);
  1574.       return;
  1575.    }
  1576.  
  1577.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
  1578.  
  1579.    if (a->enabled) {
  1580.       arrays->array_info_cache_valid = GL_FALSE;
  1581.    }
  1582. }
  1583.  
  1584.  
  1585. void
  1586. __indirect_glVertexAttribPointer(GLuint index, GLint size,
  1587.                                     GLenum type, GLboolean normalized,
  1588.                                     GLsizei stride, const GLvoid * pointer)
  1589. {
  1590.    static const uint16_t short_ops[5] = { 0, 4189, 4190, 4191, 4192 };
  1591.    static const uint16_t float_ops[5] = { 0, 4193, 4194, 4195, 4196 };
  1592.    static const uint16_t double_ops[5] = { 0, 4197, 4198, 4199, 4200 };
  1593.  
  1594.    uint16_t opcode;
  1595.    struct glx_context *gc = __glXGetCurrentContext();
  1596.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1597.    struct array_state_vector *arrays = state->array_state;
  1598.    struct array_state *a;
  1599.    unsigned true_immediate_count;
  1600.    unsigned true_immediate_size;
  1601.  
  1602.  
  1603.    if ((size < 1) || (size > 4) || (stride < 0)
  1604.        || (index > arrays->num_vertex_program_attribs)) {
  1605.       __glXSetError(gc, GL_INVALID_VALUE);
  1606.       return;
  1607.    }
  1608.  
  1609.    if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
  1610.       switch (type) {
  1611.       case GL_BYTE:
  1612.          opcode = X_GLrop_VertexAttrib4NbvARB;
  1613.          break;
  1614.       case GL_UNSIGNED_BYTE:
  1615.          opcode = X_GLrop_VertexAttrib4NubvARB;
  1616.          break;
  1617.       case GL_SHORT:
  1618.          opcode = X_GLrop_VertexAttrib4NsvARB;
  1619.          break;
  1620.       case GL_UNSIGNED_SHORT:
  1621.          opcode = X_GLrop_VertexAttrib4NusvARB;
  1622.          break;
  1623.       case GL_INT:
  1624.          opcode = X_GLrop_VertexAttrib4NivARB;
  1625.          break;
  1626.       case GL_UNSIGNED_INT:
  1627.          opcode = X_GLrop_VertexAttrib4NuivARB;
  1628.          break;
  1629.       default:
  1630.          __glXSetError(gc, GL_INVALID_ENUM);
  1631.          return;
  1632.       }
  1633.  
  1634.       true_immediate_count = 4;
  1635.    }
  1636.    else {
  1637.       true_immediate_count = size;
  1638.  
  1639.       switch (type) {
  1640.       case GL_BYTE:
  1641.          opcode = X_GLrop_VertexAttrib4bvARB;
  1642.          true_immediate_count = 4;
  1643.          break;
  1644.       case GL_UNSIGNED_BYTE:
  1645.          opcode = X_GLrop_VertexAttrib4ubvARB;
  1646.          true_immediate_count = 4;
  1647.          break;
  1648.       case GL_SHORT:
  1649.          opcode = short_ops[size];
  1650.          break;
  1651.       case GL_UNSIGNED_SHORT:
  1652.          opcode = X_GLrop_VertexAttrib4usvARB;
  1653.          true_immediate_count = 4;
  1654.          break;
  1655.       case GL_INT:
  1656.          opcode = X_GLrop_VertexAttrib4ivARB;
  1657.          true_immediate_count = 4;
  1658.          break;
  1659.       case GL_UNSIGNED_INT:
  1660.          opcode = X_GLrop_VertexAttrib4uivARB;
  1661.          true_immediate_count = 4;
  1662.          break;
  1663.       case GL_FLOAT:
  1664.          opcode = float_ops[size];
  1665.          break;
  1666.       case GL_DOUBLE:
  1667.          opcode = double_ops[size];
  1668.          break;
  1669.       default:
  1670.          __glXSetError(gc, GL_INVALID_ENUM);
  1671.          return;
  1672.       }
  1673.    }
  1674.  
  1675.    a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
  1676.    if (a == NULL) {
  1677.       __glXSetError(gc, GL_INVALID_OPERATION);
  1678.       return;
  1679.    }
  1680.  
  1681.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
  1682.                           opcode);
  1683.  
  1684.    true_immediate_size = __glXTypeSize(type) * true_immediate_count;
  1685.    ((uint16_t *) (a)->header)[0] = __GLX_PAD(a->header_size
  1686.                                              + true_immediate_size);
  1687.  
  1688.    if (a->enabled) {
  1689.       arrays->array_info_cache_valid = GL_FALSE;
  1690.    }
  1691. }
  1692.  
  1693.  
  1694. /**
  1695.  * I don't have 100% confidence that this is correct.  The different rules
  1696.  * about whether or not generic vertex attributes alias "classic" vertex
  1697.  * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
  1698.  * ARB_vertex_shader, and NV_vertex_program are a bit confusing.  My
  1699.  * feeling is that the client-side doesn't have to worry about it.  The
  1700.  * client just sends all the data to the server and lets the server deal
  1701.  * with it.
  1702.  */
  1703. void
  1704. __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
  1705.                                    GLenum type, GLsizei stride,
  1706.                                    const GLvoid * pointer)
  1707. {
  1708.    struct glx_context *gc = __glXGetCurrentContext();
  1709.    GLboolean normalized = GL_FALSE;
  1710.  
  1711.  
  1712.    switch (type) {
  1713.    case GL_UNSIGNED_BYTE:
  1714.       if (size != 4) {
  1715.          __glXSetError(gc, GL_INVALID_VALUE);
  1716.          return;
  1717.       }
  1718.       normalized = GL_TRUE;
  1719.  
  1720.    case GL_SHORT:
  1721.    case GL_FLOAT:
  1722.    case GL_DOUBLE:
  1723.       __indirect_glVertexAttribPointer(index, size, type,
  1724.                                           normalized, stride, pointer);
  1725.       return;
  1726.    default:
  1727.       __glXSetError(gc, GL_INVALID_ENUM);
  1728.       return;
  1729.    }
  1730. }
  1731.  
  1732.  
  1733. void
  1734. __indirect_glClientActiveTexture(GLenum texture)
  1735. {
  1736.    struct glx_context *const gc = __glXGetCurrentContext();
  1737.    __GLXattribute *const state =
  1738.       (__GLXattribute *) (gc->client_state_private);
  1739.    struct array_state_vector *const arrays = state->array_state;
  1740.    const GLint unit = (GLint) texture - GL_TEXTURE0;
  1741.  
  1742.  
  1743.    if ((unit < 0) || (unit >= arrays->num_texture_units)) {
  1744.       __glXSetError(gc, GL_INVALID_ENUM);
  1745.       return;
  1746.    }
  1747.  
  1748.    arrays->active_texture_unit = unit;
  1749. }
  1750.  
  1751.  
  1752. /**
  1753.  * Modify the enable state for the selected array
  1754.  */
  1755. GLboolean
  1756. __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
  1757.                     GLboolean enable)
  1758. {
  1759.    struct array_state_vector *arrays = state->array_state;
  1760.    struct array_state *a;
  1761.  
  1762.  
  1763.    /* Texture coordinate arrays have an implict index set when the
  1764.     * application calls glClientActiveTexture.
  1765.     */
  1766.    if (key == GL_TEXTURE_COORD_ARRAY) {
  1767.       index = arrays->active_texture_unit;
  1768.    }
  1769.  
  1770.    a = get_array_entry(arrays, key, index);
  1771.  
  1772.    if ((a != NULL) && (a->enabled != enable)) {
  1773.       a->enabled = enable;
  1774.       arrays->array_info_cache_valid = GL_FALSE;
  1775.    }
  1776.  
  1777.    return (a != NULL);
  1778. }
  1779.  
  1780.  
  1781. void
  1782. __glXArrayDisableAll(__GLXattribute * state)
  1783. {
  1784.    struct array_state_vector *arrays = state->array_state;
  1785.    unsigned i;
  1786.  
  1787.  
  1788.    for (i = 0; i < arrays->num_arrays; i++) {
  1789.       arrays->arrays[i].enabled = GL_FALSE;
  1790.    }
  1791.  
  1792.    arrays->array_info_cache_valid = GL_FALSE;
  1793. }
  1794.  
  1795.  
  1796. /**
  1797.  */
  1798. GLboolean
  1799. __glXGetArrayEnable(const __GLXattribute * const state,
  1800.                     GLenum key, unsigned index, GLintptr * dest)
  1801. {
  1802.    const struct array_state_vector *arrays = state->array_state;
  1803.    const struct array_state *a =
  1804.       get_array_entry((struct array_state_vector *) arrays,
  1805.                       key, index);
  1806.  
  1807.    if (a != NULL) {
  1808.       *dest = (GLintptr) a->enabled;
  1809.    }
  1810.  
  1811.    return (a != NULL);
  1812. }
  1813.  
  1814.  
  1815. /**
  1816.  */
  1817. GLboolean
  1818. __glXGetArrayType(const __GLXattribute * const state,
  1819.                   GLenum key, unsigned index, GLintptr * dest)
  1820. {
  1821.    const struct array_state_vector *arrays = state->array_state;
  1822.    const struct array_state *a =
  1823.       get_array_entry((struct array_state_vector *) arrays,
  1824.                       key, index);
  1825.  
  1826.    if (a != NULL) {
  1827.       *dest = (GLintptr) a->data_type;
  1828.    }
  1829.  
  1830.    return (a != NULL);
  1831. }
  1832.  
  1833.  
  1834. /**
  1835.  */
  1836. GLboolean
  1837. __glXGetArraySize(const __GLXattribute * const state,
  1838.                   GLenum key, unsigned index, GLintptr * dest)
  1839. {
  1840.    const struct array_state_vector *arrays = state->array_state;
  1841.    const struct array_state *a =
  1842.       get_array_entry((struct array_state_vector *) arrays,
  1843.                       key, index);
  1844.  
  1845.    if (a != NULL) {
  1846.       *dest = (GLintptr) a->count;
  1847.    }
  1848.  
  1849.    return (a != NULL);
  1850. }
  1851.  
  1852.  
  1853. /**
  1854.  */
  1855. GLboolean
  1856. __glXGetArrayStride(const __GLXattribute * const state,
  1857.                     GLenum key, unsigned index, GLintptr * dest)
  1858. {
  1859.    const struct array_state_vector *arrays = state->array_state;
  1860.    const struct array_state *a =
  1861.       get_array_entry((struct array_state_vector *) arrays,
  1862.                       key, index);
  1863.  
  1864.    if (a != NULL) {
  1865.       *dest = (GLintptr) a->user_stride;
  1866.    }
  1867.  
  1868.    return (a != NULL);
  1869. }
  1870.  
  1871.  
  1872. /**
  1873.  */
  1874. GLboolean
  1875. __glXGetArrayPointer(const __GLXattribute * const state,
  1876.                      GLenum key, unsigned index, void **dest)
  1877. {
  1878.    const struct array_state_vector *arrays = state->array_state;
  1879.    const struct array_state *a =
  1880.       get_array_entry((struct array_state_vector *) arrays,
  1881.                       key, index);
  1882.  
  1883.  
  1884.    if (a != NULL) {
  1885.       *dest = (void *) (a->data);
  1886.    }
  1887.  
  1888.    return (a != NULL);
  1889. }
  1890.  
  1891.  
  1892. /**
  1893.  */
  1894. GLboolean
  1895. __glXGetArrayNormalized(const __GLXattribute * const state,
  1896.                         GLenum key, unsigned index, GLintptr * dest)
  1897. {
  1898.    const struct array_state_vector *arrays = state->array_state;
  1899.    const struct array_state *a =
  1900.       get_array_entry((struct array_state_vector *) arrays,
  1901.                       key, index);
  1902.  
  1903.  
  1904.    if (a != NULL) {
  1905.       *dest = (GLintptr) a->normalized;
  1906.    }
  1907.  
  1908.    return (a != NULL);
  1909. }
  1910.  
  1911.  
  1912. /**
  1913.  */
  1914. GLuint
  1915. __glXGetActiveTextureUnit(const __GLXattribute * const state)
  1916. {
  1917.    return state->array_state->active_texture_unit;
  1918. }
  1919.  
  1920.  
  1921. void
  1922. __glXPushArrayState(__GLXattribute * state)
  1923. {
  1924.    struct array_state_vector *arrays = state->array_state;
  1925.    struct array_stack_state *stack =
  1926.       &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
  1927.    unsigned i;
  1928.  
  1929.    /* XXX are we pushing _all_ the necessary fields? */
  1930.    for (i = 0; i < arrays->num_arrays; i++) {
  1931.       stack[i].data = arrays->arrays[i].data;
  1932.       stack[i].data_type = arrays->arrays[i].data_type;
  1933.       stack[i].user_stride = arrays->arrays[i].user_stride;
  1934.       stack[i].count = arrays->arrays[i].count;
  1935.       stack[i].key = arrays->arrays[i].key;
  1936.       stack[i].index = arrays->arrays[i].index;
  1937.       stack[i].enabled = arrays->arrays[i].enabled;
  1938.    }
  1939.  
  1940.    arrays->active_texture_unit_stack[arrays->stack_index] =
  1941.       arrays->active_texture_unit;
  1942.  
  1943.    arrays->stack_index++;
  1944. }
  1945.  
  1946.  
  1947. void
  1948. __glXPopArrayState(__GLXattribute * state)
  1949. {
  1950.    struct array_state_vector *arrays = state->array_state;
  1951.    struct array_stack_state *stack;
  1952.    unsigned i;
  1953.  
  1954.  
  1955.    arrays->stack_index--;
  1956.    stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
  1957.  
  1958.    for (i = 0; i < arrays->num_arrays; i++) {
  1959.       switch (stack[i].key) {
  1960.       case GL_NORMAL_ARRAY:
  1961.          __indirect_glNormalPointer(stack[i].data_type,
  1962.                                     stack[i].user_stride, stack[i].data);
  1963.          break;
  1964.       case GL_COLOR_ARRAY:
  1965.          __indirect_glColorPointer(stack[i].count,
  1966.                                    stack[i].data_type,
  1967.                                    stack[i].user_stride, stack[i].data);
  1968.          break;
  1969.       case GL_INDEX_ARRAY:
  1970.          __indirect_glIndexPointer(stack[i].data_type,
  1971.                                    stack[i].user_stride, stack[i].data);
  1972.          break;
  1973.       case GL_EDGE_FLAG_ARRAY:
  1974.          __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
  1975.          break;
  1976.       case GL_TEXTURE_COORD_ARRAY:
  1977.          arrays->active_texture_unit = stack[i].index;
  1978.          __indirect_glTexCoordPointer(stack[i].count,
  1979.                                       stack[i].data_type,
  1980.                                       stack[i].user_stride, stack[i].data);
  1981.          break;
  1982.       case GL_SECONDARY_COLOR_ARRAY:
  1983.          __indirect_glSecondaryColorPointer(stack[i].count,
  1984.                                                stack[i].data_type,
  1985.                                                stack[i].user_stride,
  1986.                                                stack[i].data);
  1987.          break;
  1988.       case GL_FOG_COORDINATE_ARRAY:
  1989.          __indirect_glFogCoordPointer(stack[i].data_type,
  1990.                                          stack[i].user_stride, stack[i].data);
  1991.          break;
  1992.  
  1993.       }
  1994.  
  1995.       __glXSetArrayEnable(state, stack[i].key, stack[i].index,
  1996.                           stack[i].enabled);
  1997.    }
  1998.  
  1999.    arrays->active_texture_unit =
  2000.       arrays->active_texture_unit_stack[arrays->stack_index];
  2001. }
  2002.