Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * (C) Copyright IBM Corporation 2004, 2005
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the "Software"),
  7.  * to deal in the Software without restriction, including without limitation
  8.  * the rights to use, copy, modify, merge, publish, distribute, sub license,
  9.  * and/or sell copies of the Software, and to permit persons to whom the
  10.  * Software is furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the next
  13.  * paragraph) shall be included in all copies or substantial portions of the
  14.  * Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
  19.  * IBM,
  20.  * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  21.  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  22.  * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23.  * SOFTWARE.
  24.  */
  25.  
  26. #include <inttypes.h>
  27. #include <assert.h>
  28. #include <string.h>
  29.  
  30. #include "glxclient.h"
  31. #include "indirect.h"
  32. #include <GL/glxproto.h>
  33. #include "glxextensions.h"
  34. #include "indirect_vertex_array.h"
  35. #include "indirect_vertex_array_priv.h"
  36.  
  37. #define __GLX_PAD(n) (((n)+3) & ~3)
  38.  
  39. /**
  40.  * \file indirect_vertex_array.c
  41.  * Implement GLX protocol for vertex arrays and vertex buffer objects.
  42.  *
  43.  * The most important function in this fill is \c fill_array_info_cache.
  44.  * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
  45.  * in the DrawArrays protocol.  Certain operations, such as enabling or
  46.  * disabling an array, can invalidate this cache.  \c fill_array_info_cache
  47.  * fills-in this data.  Additionally, it examines the enabled state and
  48.  * other factors to determine what "version" of DrawArrays protocoal can be
  49.  * used.
  50.  *
  51.  * Current, only two versions of DrawArrays protocol are implemented.  The
  52.  * first version is the "none" protocol.  This is the fallback when the
  53.  * server does not support GL 1.1 / EXT_vertex_arrays.  It is implemented
  54.  * by sending batches of immediate mode commands that are equivalent to the
  55.  * DrawArrays protocol.
  56.  *
  57.  * The other protocol that is currently implemented is the "old" protocol.
  58.  * This is the GL 1.1 DrawArrays protocol.  The only difference between GL
  59.  * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
  60.  * This protocol is called "old" because the ARB is in the process of
  61.  * defining a new protocol, which will probably be called wither "new" or
  62.  * "vbo", to support multiple texture coordinate arrays, generic attributes,
  63.  * and vertex buffer objects.
  64.  *
  65.  * \author Ian Romanick <ian.d.romanick@intel.com>
  66.  */
  67.  
  68. static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
  69. static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
  70.  
  71. static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
  72.                                    const GLvoid * indices);
  73. static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
  74.                                   const GLvoid * indices);
  75.  
  76.  
  77. static GLubyte *emit_element_none(GLubyte * dst,
  78.                                   const struct array_state_vector *arrays,
  79.                                   unsigned index);
  80. static GLubyte *emit_element_old(GLubyte * dst,
  81.                                  const struct array_state_vector *arrays,
  82.                                  unsigned index);
  83. static struct array_state *get_array_entry(const struct array_state_vector
  84.                                            *arrays, GLenum key,
  85.                                            unsigned index);
  86. static void fill_array_info_cache(struct array_state_vector *arrays);
  87. static GLboolean validate_mode(struct glx_context * gc, GLenum mode);
  88. static GLboolean validate_count(struct glx_context * gc, GLsizei count);
  89. static GLboolean validate_type(struct glx_context * gc, GLenum type);
  90.  
  91.  
  92. /**
  93.  * Table of sizes, in bytes, of a GL types.  All of the type enums are be in
  94.  * the range 0x1400 - 0x140F.  That includes types added by extensions (i.e.,
  95.  * \c GL_HALF_FLOAT_NV).  This elements of this table correspond to the
  96.  * type enums masked with 0x0f.
  97.  *
  98.  * \notes
  99.  * \c GL_HALF_FLOAT_NV is not included.  Neither are \c GL_2_BYTES,
  100.  * \c GL_3_BYTES, or \c GL_4_BYTES.
  101.  */
  102. const GLuint __glXTypeSize_table[16] = {
  103.    1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
  104. };
  105.  
  106.  
  107. /**
  108.  * Free the per-context array state that was allocated with
  109.  * __glXInitVertexArrayState().
  110.  */
  111. void
  112. __glXFreeVertexArrayState(struct glx_context * gc)
  113. {
  114.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  115.    struct array_state_vector *arrays = state->array_state;
  116.  
  117.    if (arrays) {
  118.       free(arrays->stack);
  119.       arrays->stack = NULL;
  120.       free(arrays->arrays);
  121.       arrays->arrays = NULL;
  122.       free(arrays);
  123.       state->array_state = NULL;
  124.    }
  125. }
  126.  
  127.  
  128. /**
  129.  * Initialize vertex array state of a GLX context.
  130.  *
  131.  * \param gc  GLX context whose vertex array state is to be initialized.
  132.  *
  133.  * \warning
  134.  * This function may only be called after struct glx_context::gl_extension_bits,
  135.  * struct glx_context::server_minor, and __GLXcontext::server_major have been
  136.  * initialized.  These values are used to determine what vertex arrays are
  137.  * supported.
  138.  *
  139.  * \bug
  140.  * Return values from malloc are not properly tested.
  141.  */
  142. void
  143. __glXInitVertexArrayState(struct glx_context * gc)
  144. {
  145.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  146.    struct array_state_vector *arrays;
  147.  
  148.    unsigned array_count;
  149.    int texture_units = 1, vertex_program_attribs = 0;
  150.    unsigned i, j;
  151.  
  152.    GLboolean got_fog = GL_FALSE;
  153.    GLboolean got_secondary_color = GL_FALSE;
  154.  
  155.  
  156.    arrays = calloc(1, sizeof(struct array_state_vector));
  157.    state->array_state = arrays;
  158.  
  159.    arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
  160.    arrays->new_DrawArrays_possible = GL_FALSE;
  161.    arrays->DrawArrays = NULL;
  162.  
  163.    arrays->active_texture_unit = 0;
  164.  
  165.  
  166.    /* Determine how many arrays are actually needed.  Only arrays that
  167.     * are supported by the server are create.  For example, if the server
  168.     * supports only 2 texture units, then only 2 texture coordinate arrays
  169.     * are created.
  170.     *
  171.     * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
  172.     * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
  173.     * GL_EDGE_FLAG_ARRAY are supported.
  174.     */
  175.  
  176.    array_count = 5;
  177.  
  178.    if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
  179.        || (gc->server_major > 1) || (gc->server_minor >= 4)) {
  180.       got_fog = GL_TRUE;
  181.       array_count++;
  182.    }
  183.  
  184.    if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
  185.        || (gc->server_major > 1) || (gc->server_minor >= 4)) {
  186.       got_secondary_color = GL_TRUE;
  187.       array_count++;
  188.    }
  189.  
  190.    if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
  191.        || (gc->server_major > 1) || (gc->server_minor >= 3)) {
  192.       __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
  193.    }
  194.  
  195.    if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
  196.       __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
  197.                                    GL_MAX_PROGRAM_ATTRIBS_ARB,
  198.                                    &vertex_program_attribs);
  199.    }
  200.  
  201.    arrays->num_texture_units = texture_units;
  202.    arrays->num_vertex_program_attribs = vertex_program_attribs;
  203.    array_count += texture_units + vertex_program_attribs;
  204.    arrays->num_arrays = array_count;
  205.    arrays->arrays = calloc(array_count, sizeof(struct array_state));
  206.  
  207.    arrays->arrays[0].data_type = GL_FLOAT;
  208.    arrays->arrays[0].count = 3;
  209.    arrays->arrays[0].key = GL_NORMAL_ARRAY;
  210.    arrays->arrays[0].normalized = GL_TRUE;
  211.    arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
  212.  
  213.    arrays->arrays[1].data_type = GL_FLOAT;
  214.    arrays->arrays[1].count = 4;
  215.    arrays->arrays[1].key = GL_COLOR_ARRAY;
  216.    arrays->arrays[1].normalized = GL_TRUE;
  217.    arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
  218.  
  219.    arrays->arrays[2].data_type = GL_FLOAT;
  220.    arrays->arrays[2].count = 1;
  221.    arrays->arrays[2].key = GL_INDEX_ARRAY;
  222.    arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
  223.  
  224.    arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
  225.    arrays->arrays[3].count = 1;
  226.    arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
  227.    arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
  228.  
  229.    for (i = 0; i < texture_units; i++) {
  230.       arrays->arrays[4 + i].data_type = GL_FLOAT;
  231.       arrays->arrays[4 + i].count = 4;
  232.       arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
  233.  
  234.       arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
  235.       arrays->arrays[4 + i].index = i;
  236.  
  237.       arrays->arrays[4 + i].header[1] = i + GL_TEXTURE0;
  238.    }
  239.  
  240.    i = 4 + texture_units;
  241.  
  242.    if (got_fog) {
  243.       arrays->arrays[i].data_type = GL_FLOAT;
  244.       arrays->arrays[i].count = 1;
  245.       arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
  246.       arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
  247.       i++;
  248.    }
  249.  
  250.    if (got_secondary_color) {
  251.       arrays->arrays[i].data_type = GL_FLOAT;
  252.       arrays->arrays[i].count = 3;
  253.       arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
  254.       arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
  255.       arrays->arrays[i].normalized = GL_TRUE;
  256.       i++;
  257.    }
  258.  
  259.  
  260.    for (j = 0; j < vertex_program_attribs; j++) {
  261.       const unsigned idx = (vertex_program_attribs - (j + 1));
  262.  
  263.  
  264.       arrays->arrays[idx + i].data_type = GL_FLOAT;
  265.       arrays->arrays[idx + i].count = 4;
  266.       arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
  267.  
  268.       arrays->arrays[idx + i].old_DrawArrays_possible = 0;
  269.       arrays->arrays[idx + i].index = idx;
  270.  
  271.       arrays->arrays[idx + i].header[1] = idx;
  272.    }
  273.  
  274.    i += vertex_program_attribs;
  275.  
  276.  
  277.    /* Vertex array *must* be last becuase of the way that
  278.     * emit_DrawArrays_none works.
  279.     */
  280.  
  281.    arrays->arrays[i].data_type = GL_FLOAT;
  282.    arrays->arrays[i].count = 4;
  283.    arrays->arrays[i].key = GL_VERTEX_ARRAY;
  284.    arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
  285.  
  286.    assert((i + 1) == arrays->num_arrays);
  287.  
  288.    arrays->stack_index = 0;
  289.    arrays->stack = malloc(sizeof(struct array_stack_state)
  290.                           * arrays->num_arrays
  291.                           * __GL_CLIENT_ATTRIB_STACK_DEPTH);
  292. }
  293.  
  294.  
  295. /**
  296.  * Calculate the size of a single vertex for the "none" protocol.  This is
  297.  * essentially the size of all the immediate-mode commands required to
  298.  * implement the enabled vertex arrays.
  299.  */
  300. static size_t
  301. calculate_single_vertex_size_none(const struct array_state_vector *arrays)
  302. {
  303.    size_t single_vertex_size = 0;
  304.    unsigned i;
  305.  
  306.  
  307.    for (i = 0; i < arrays->num_arrays; i++) {
  308.       if (arrays->arrays[i].enabled) {
  309.          single_vertex_size += ((uint16_t *) arrays->arrays[i].header)[0];
  310.       }
  311.    }
  312.  
  313.    return single_vertex_size;
  314. }
  315.  
  316.  
  317. /**
  318.  * Emit a single element using non-DrawArrays protocol.
  319.  */
  320. GLubyte *
  321. emit_element_none(GLubyte * dst,
  322.                   const struct array_state_vector * arrays, unsigned index)
  323. {
  324.    unsigned i;
  325.  
  326.  
  327.    for (i = 0; i < arrays->num_arrays; i++) {
  328.       if (arrays->arrays[i].enabled) {
  329.          const size_t offset = index * arrays->arrays[i].true_stride;
  330.  
  331.          /* The generic attributes can have more data than is in the
  332.           * elements.  This is because a vertex array can be a 2 element,
  333.           * normalized, unsigned short, but the "closest" immediate mode
  334.           * protocol is for a 4Nus.  Since the sizes are small, the
  335.           * performance impact on modern processors should be negligible.
  336.           */
  337.          (void) memset(dst, 0, ((uint16_t *) arrays->arrays[i].header)[0]);
  338.  
  339.          (void) memcpy(dst, arrays->arrays[i].header,
  340.                        arrays->arrays[i].header_size);
  341.  
  342.          dst += arrays->arrays[i].header_size;
  343.  
  344.          (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
  345.                        arrays->arrays[i].element_size);
  346.  
  347.          dst += __GLX_PAD(arrays->arrays[i].element_size);
  348.       }
  349.    }
  350.  
  351.    return dst;
  352. }
  353.  
  354.  
  355. /**
  356.  * Emit a single element using "old" DrawArrays protocol from
  357.  * EXT_vertex_arrays / OpenGL 1.1.
  358.  */
  359. GLubyte *
  360. emit_element_old(GLubyte * dst,
  361.                  const struct array_state_vector * arrays, unsigned index)
  362. {
  363.    unsigned i;
  364.  
  365.  
  366.    for (i = 0; i < arrays->num_arrays; i++) {
  367.       if (arrays->arrays[i].enabled) {
  368.          const size_t offset = index * arrays->arrays[i].true_stride;
  369.  
  370.          (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
  371.                        arrays->arrays[i].element_size);
  372.  
  373.          dst += __GLX_PAD(arrays->arrays[i].element_size);
  374.       }
  375.    }
  376.  
  377.    return dst;
  378. }
  379.  
  380.  
  381. struct array_state *
  382. get_array_entry(const struct array_state_vector *arrays,
  383.                 GLenum key, unsigned index)
  384. {
  385.    unsigned i;
  386.  
  387.    for (i = 0; i < arrays->num_arrays; i++) {
  388.       if ((arrays->arrays[i].key == key)
  389.           && (arrays->arrays[i].index == index)) {
  390.          return &arrays->arrays[i];
  391.       }
  392.    }
  393.  
  394.    return NULL;
  395. }
  396.  
  397.  
  398. static GLboolean
  399. allocate_array_info_cache(struct array_state_vector *arrays,
  400.                           size_t required_size)
  401. {
  402. #define MAX_HEADER_SIZE 20
  403.    if (arrays->array_info_cache_buffer_size < required_size) {
  404.       GLubyte *temp = realloc(arrays->array_info_cache_base,
  405.                               required_size + MAX_HEADER_SIZE);
  406.  
  407.       if (temp == NULL) {
  408.          return GL_FALSE;
  409.       }
  410.  
  411.       arrays->array_info_cache_base = temp;
  412.       arrays->array_info_cache = temp + MAX_HEADER_SIZE;
  413.       arrays->array_info_cache_buffer_size = required_size;
  414.    }
  415.  
  416.    arrays->array_info_cache_size = required_size;
  417.    return GL_TRUE;
  418. }
  419.  
  420.  
  421. /**
  422.  */
  423. void
  424. fill_array_info_cache(struct array_state_vector *arrays)
  425. {
  426.    GLboolean old_DrawArrays_possible;
  427.    unsigned i;
  428.  
  429.  
  430.    /* Determine how many arrays are enabled.
  431.     */
  432.  
  433.    arrays->enabled_client_array_count = 0;
  434.    old_DrawArrays_possible = arrays->old_DrawArrays_possible;
  435.    for (i = 0; i < arrays->num_arrays; i++) {
  436.       if (arrays->arrays[i].enabled) {
  437.          arrays->enabled_client_array_count++;
  438.          old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
  439.       }
  440.    }
  441.  
  442.    if (arrays->new_DrawArrays_possible) {
  443.       assert(!arrays->new_DrawArrays_possible);
  444.    }
  445.    else if (old_DrawArrays_possible) {
  446.       const size_t required_size = arrays->enabled_client_array_count * 12;
  447.       uint32_t *info;
  448.  
  449.  
  450.       if (!allocate_array_info_cache(arrays, required_size)) {
  451.          return;
  452.       }
  453.  
  454.  
  455.       info = (uint32_t *) arrays->array_info_cache;
  456.       for (i = 0; i < arrays->num_arrays; i++) {
  457.          if (arrays->arrays[i].enabled) {
  458.             *(info++) = arrays->arrays[i].data_type;
  459.             *(info++) = arrays->arrays[i].count;
  460.             *(info++) = arrays->arrays[i].key;
  461.          }
  462.       }
  463.  
  464.       arrays->DrawArrays = emit_DrawArrays_old;
  465.       arrays->DrawElements = emit_DrawElements_old;
  466.    }
  467.    else {
  468.       arrays->DrawArrays = emit_DrawArrays_none;
  469.       arrays->DrawElements = emit_DrawElements_none;
  470.    }
  471.  
  472.    arrays->array_info_cache_valid = GL_TRUE;
  473. }
  474.  
  475.  
  476. /**
  477.  * Emit a \c glDrawArrays command using the "none" protocol.  That is,
  478.  * emit immediate-mode commands that are equivalent to the requiested
  479.  * \c glDrawArrays command.  This is used with servers that don't support
  480.  * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
  481.  * vertex state is enabled that is not compatible with that protocol.
  482.  */
  483. void
  484. emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
  485. {
  486.    struct glx_context *gc = __glXGetCurrentContext();
  487.    const __GLXattribute *state =
  488.       (const __GLXattribute *) (gc->client_state_private);
  489.    struct array_state_vector *arrays = state->array_state;
  490.  
  491.    size_t single_vertex_size;
  492.    GLubyte *pc;
  493.    unsigned i;
  494.    static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
  495.    static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
  496.  
  497.  
  498.    single_vertex_size = calculate_single_vertex_size_none(arrays);
  499.  
  500.    pc = gc->pc;
  501.  
  502.    (void) memcpy(pc, begin_cmd, 4);
  503.    *(int *) (pc + 4) = mode;
  504.  
  505.    pc += 8;
  506.  
  507.    for (i = 0; i < count; i++) {
  508.       if ((pc + single_vertex_size) >= gc->bufEnd) {
  509.          pc = __glXFlushRenderBuffer(gc, pc);
  510.       }
  511.  
  512.       pc = emit_element_none(pc, arrays, first + i);
  513.    }
  514.  
  515.    if ((pc + 4) >= gc->bufEnd) {
  516.       pc = __glXFlushRenderBuffer(gc, pc);
  517.    }
  518.  
  519.    (void) memcpy(pc, end_cmd, 4);
  520.    pc += 4;
  521.  
  522.    gc->pc = pc;
  523.    if (gc->pc > gc->limit) {
  524.       (void) __glXFlushRenderBuffer(gc, gc->pc);
  525.    }
  526. }
  527.  
  528.  
  529. /**
  530.  * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
  531.  * protocol.
  532.  *
  533.  * \param gc                    GLX context.
  534.  * \param arrays                Array state.
  535.  * \param elements_per_request  Location to store the number of elements that
  536.  *                              can fit in a single Render / RenderLarge
  537.  *                              command.
  538.  * \param total_request         Total number of requests for a RenderLarge
  539.  *                              command.  If a Render command is used, this
  540.  *                              will be zero.
  541.  * \param mode                  Drawing mode.
  542.  * \param count                 Number of vertices.
  543.  *
  544.  * \returns
  545.  * A pointer to the buffer for array data.
  546.  */
  547. static GLubyte *
  548. emit_DrawArrays_header_old(struct glx_context * gc,
  549.                            struct array_state_vector *arrays,
  550.                            size_t * elements_per_request,
  551.                            unsigned int *total_requests,
  552.                            GLenum mode, GLsizei count)
  553. {
  554.    size_t command_size;
  555.    size_t single_vertex_size;
  556.    const unsigned header_size = 16;
  557.    unsigned i;
  558.    GLubyte *pc;
  559.  
  560.  
  561.    /* Determine the size of the whole command.  This includes the header,
  562.     * the ARRAY_INFO data and the array data.  Once this size is calculated,
  563.     * it will be known whether a Render or RenderLarge command is needed.
  564.     */
  565.  
  566.    single_vertex_size = 0;
  567.    for (i = 0; i < arrays->num_arrays; i++) {
  568.       if (arrays->arrays[i].enabled) {
  569.          single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
  570.       }
  571.    }
  572.  
  573.    command_size = arrays->array_info_cache_size + header_size
  574.       + (single_vertex_size * count);
  575.  
  576.  
  577.    /* Write the header for either a Render command or a RenderLarge
  578.     * command.  After the header is written, write the ARRAY_INFO data.
  579.     */
  580.  
  581.    if (command_size > gc->maxSmallRenderCommandSize) {
  582.       /* maxSize is the maximum amount of data can be stuffed into a single
  583.        * packet.  sz_xGLXRenderReq is added because bufSize is the maximum
  584.        * packet size minus sz_xGLXRenderReq.
  585.        */
  586.       const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
  587.          - sz_xGLXRenderLargeReq;
  588.       unsigned vertex_requests;
  589.  
  590.  
  591.       /* Calculate the number of data packets that will be required to send
  592.        * the whole command.  To do this, the number of verticies that
  593.        * will fit in a single buffer must be calculated.
  594.        *
  595.        * The important value here is elements_per_request.  This is the
  596.        * number of complete array elements that will fit in a single
  597.        * buffer.  There may be some wasted space at the end of the buffer,
  598.        * but splitting elements across buffer boundries would be painful.
  599.        */
  600.  
  601.       elements_per_request[0] = maxSize / single_vertex_size;
  602.  
  603.       vertex_requests = (count + elements_per_request[0] - 1)
  604.          / elements_per_request[0];
  605.  
  606.       *total_requests = vertex_requests + 1;
  607.  
  608.  
  609.       __glXFlushRenderBuffer(gc, gc->pc);
  610.  
  611.       command_size += 4;
  612.  
  613.       pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
  614.       *(uint32_t *) (pc + 0) = command_size;
  615.       *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
  616.       *(uint32_t *) (pc + 8) = count;
  617.       *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
  618.       *(uint32_t *) (pc + 16) = mode;
  619.  
  620.       __glXSendLargeChunk(gc, 1, *total_requests, pc,
  621.                           header_size + 4 + arrays->array_info_cache_size);
  622.  
  623.       pc = gc->pc;
  624.    }
  625.    else {
  626.       if ((gc->pc + command_size) >= gc->bufEnd) {
  627.          (void) __glXFlushRenderBuffer(gc, gc->pc);
  628.       }
  629.  
  630.       pc = gc->pc;
  631.       *(uint16_t *) (pc + 0) = command_size;
  632.       *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
  633.       *(uint32_t *) (pc + 4) = count;
  634.       *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
  635.       *(uint32_t *) (pc + 12) = mode;
  636.  
  637.       pc += header_size;
  638.  
  639.       (void) memcpy(pc, arrays->array_info_cache,
  640.                     arrays->array_info_cache_size);
  641.       pc += arrays->array_info_cache_size;
  642.  
  643.       *elements_per_request = count;
  644.       *total_requests = 0;
  645.    }
  646.  
  647.  
  648.    return pc;
  649. }
  650.  
  651.  
  652. /**
  653.  */
  654. void
  655. emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
  656. {
  657.    struct glx_context *gc = __glXGetCurrentContext();
  658.    const __GLXattribute *state =
  659.       (const __GLXattribute *) (gc->client_state_private);
  660.    struct array_state_vector *arrays = state->array_state;
  661.  
  662.    GLubyte *pc;
  663.    size_t elements_per_request;
  664.    unsigned total_requests = 0;
  665.    unsigned i;
  666.    size_t total_sent = 0;
  667.  
  668.  
  669.    pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
  670.                                    &total_requests, mode, count);
  671.  
  672.  
  673.    /* Write the arrays.
  674.     */
  675.  
  676.    if (total_requests == 0) {
  677.       assert(elements_per_request >= count);
  678.  
  679.       for (i = 0; i < count; i++) {
  680.          pc = emit_element_old(pc, arrays, i + first);
  681.       }
  682.  
  683.       assert(pc <= gc->bufEnd);
  684.  
  685.       gc->pc = pc;
  686.       if (gc->pc > gc->limit) {
  687.          (void) __glXFlushRenderBuffer(gc, gc->pc);
  688.       }
  689.    }
  690.    else {
  691.       unsigned req;
  692.  
  693.  
  694.       for (req = 2; req <= total_requests; req++) {
  695.          if (count < elements_per_request) {
  696.             elements_per_request = count;
  697.          }
  698.  
  699.          pc = gc->pc;
  700.          for (i = 0; i < elements_per_request; i++) {
  701.             pc = emit_element_old(pc, arrays, i + first);
  702.          }
  703.  
  704.          first += elements_per_request;
  705.  
  706.          total_sent += (size_t) (pc - gc->pc);
  707.          __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
  708.  
  709.          count -= elements_per_request;
  710.       }
  711.    }
  712. }
  713.  
  714.  
  715. void
  716. emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
  717.                        const GLvoid * indices)
  718. {
  719.    struct glx_context *gc = __glXGetCurrentContext();
  720.    const __GLXattribute *state =
  721.       (const __GLXattribute *) (gc->client_state_private);
  722.    struct array_state_vector *arrays = state->array_state;
  723.    static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
  724.    static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
  725.  
  726.    GLubyte *pc;
  727.    size_t single_vertex_size;
  728.    unsigned i;
  729.  
  730.  
  731.    single_vertex_size = calculate_single_vertex_size_none(arrays);
  732.  
  733.  
  734.    if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
  735.       gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
  736.    }
  737.  
  738.    pc = gc->pc;
  739.  
  740.    (void) memcpy(pc, begin_cmd, 4);
  741.    *(int *) (pc + 4) = mode;
  742.  
  743.    pc += 8;
  744.  
  745.    for (i = 0; i < count; i++) {
  746.       unsigned index = 0;
  747.  
  748.       if ((pc + single_vertex_size) >= gc->bufEnd) {
  749.          pc = __glXFlushRenderBuffer(gc, pc);
  750.       }
  751.  
  752.       switch (type) {
  753.       case GL_UNSIGNED_INT:
  754.          index = (unsigned) (((GLuint *) indices)[i]);
  755.          break;
  756.       case GL_UNSIGNED_SHORT:
  757.          index = (unsigned) (((GLushort *) indices)[i]);
  758.          break;
  759.       case GL_UNSIGNED_BYTE:
  760.          index = (unsigned) (((GLubyte *) indices)[i]);
  761.          break;
  762.       }
  763.       pc = emit_element_none(pc, arrays, index);
  764.    }
  765.  
  766.    if ((pc + 4) >= gc->bufEnd) {
  767.       pc = __glXFlushRenderBuffer(gc, pc);
  768.    }
  769.  
  770.    (void) memcpy(pc, end_cmd, 4);
  771.    pc += 4;
  772.  
  773.    gc->pc = pc;
  774.    if (gc->pc > gc->limit) {
  775.       (void) __glXFlushRenderBuffer(gc, gc->pc);
  776.    }
  777. }
  778.  
  779.  
  780. /**
  781.  */
  782. void
  783. emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
  784.                       const GLvoid * indices)
  785. {
  786.    struct glx_context *gc = __glXGetCurrentContext();
  787.    const __GLXattribute *state =
  788.       (const __GLXattribute *) (gc->client_state_private);
  789.    struct array_state_vector *arrays = state->array_state;
  790.  
  791.    GLubyte *pc;
  792.    size_t elements_per_request;
  793.    unsigned total_requests = 0;
  794.    unsigned i;
  795.    unsigned req;
  796.    unsigned req_element = 0;
  797.  
  798.  
  799.    pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
  800.                                    &total_requests, mode, count);
  801.  
  802.  
  803.    /* Write the arrays.
  804.     */
  805.  
  806.    req = 2;
  807.    while (count > 0) {
  808.       if (count < elements_per_request) {
  809.          elements_per_request = count;
  810.       }
  811.  
  812.       switch (type) {
  813.       case GL_UNSIGNED_INT:{
  814.             const GLuint *ui_ptr = (const GLuint *) indices + req_element;
  815.  
  816.             for (i = 0; i < elements_per_request; i++) {
  817.                const GLint index = (GLint) * (ui_ptr++);
  818.                pc = emit_element_old(pc, arrays, index);
  819.             }
  820.             break;
  821.          }
  822.       case GL_UNSIGNED_SHORT:{
  823.             const GLushort *us_ptr = (const GLushort *) indices + req_element;
  824.  
  825.             for (i = 0; i < elements_per_request; i++) {
  826.                const GLint index = (GLint) * (us_ptr++);
  827.                pc = emit_element_old(pc, arrays, index);
  828.             }
  829.             break;
  830.          }
  831.       case GL_UNSIGNED_BYTE:{
  832.             const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
  833.  
  834.             for (i = 0; i < elements_per_request; i++) {
  835.                const GLint index = (GLint) * (ub_ptr++);
  836.                pc = emit_element_old(pc, arrays, index);
  837.             }
  838.             break;
  839.          }
  840.       }
  841.  
  842.       if (total_requests != 0) {
  843.          __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
  844.          pc = gc->pc;
  845.          req++;
  846.       }
  847.  
  848.       count -= elements_per_request;
  849.       req_element += elements_per_request;
  850.    }
  851.  
  852.  
  853.    assert((total_requests == 0) || ((req - 1) == total_requests));
  854.  
  855.    if (total_requests == 0) {
  856.       assert(pc <= gc->bufEnd);
  857.  
  858.       gc->pc = pc;
  859.       if (gc->pc > gc->limit) {
  860.          (void) __glXFlushRenderBuffer(gc, gc->pc);
  861.       }
  862.    }
  863. }
  864.  
  865.  
  866. /**
  867.  * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
  868.  * If it is not valid, then an error code is set in the GLX context.
  869.  *
  870.  * \returns
  871.  * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
  872.  */
  873. static GLboolean
  874. validate_mode(struct glx_context * gc, GLenum mode)
  875. {
  876.    switch (mode) {
  877.    case GL_POINTS:
  878.    case GL_LINE_STRIP:
  879.    case GL_LINE_LOOP:
  880.    case GL_LINES:
  881.    case GL_TRIANGLE_STRIP:
  882.    case GL_TRIANGLE_FAN:
  883.    case GL_TRIANGLES:
  884.    case GL_QUAD_STRIP:
  885.    case GL_QUADS:
  886.    case GL_POLYGON:
  887.       break;
  888.    default:
  889.       __glXSetError(gc, GL_INVALID_ENUM);
  890.       return GL_FALSE;
  891.    }
  892.  
  893.    return GL_TRUE;
  894. }
  895.  
  896.  
  897. /**
  898.  * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
  899.  * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
  900.  * being set.  A value of zero will not result in an error being set, but
  901.  * will result in \c GL_FALSE being returned.
  902.  *
  903.  * \returns
  904.  * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
  905.  */
  906. static GLboolean
  907. validate_count(struct glx_context * gc, GLsizei count)
  908. {
  909.    if (count < 0) {
  910.       __glXSetError(gc, GL_INVALID_VALUE);
  911.    }
  912.  
  913.    return (count > 0);
  914. }
  915.  
  916.  
  917. /**
  918.  * Validate that the \c type parameter to \c glDrawElements, et. al. is
  919.  * valid.  Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
  920.  * \c GL_UNSIGNED_INT are valid.
  921.  *
  922.  * \returns
  923.  * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
  924.  */
  925. static GLboolean
  926. validate_type(struct glx_context * gc, GLenum type)
  927. {
  928.    switch (type) {
  929.    case GL_UNSIGNED_INT:
  930.    case GL_UNSIGNED_SHORT:
  931.    case GL_UNSIGNED_BYTE:
  932.       return GL_TRUE;
  933.    default:
  934.       __glXSetError(gc, GL_INVALID_ENUM);
  935.       return GL_FALSE;
  936.    }
  937. }
  938.  
  939.  
  940. void
  941. __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
  942. {
  943.    struct glx_context *gc = __glXGetCurrentContext();
  944.    const __GLXattribute *state =
  945.       (const __GLXattribute *) (gc->client_state_private);
  946.    struct array_state_vector *arrays = state->array_state;
  947.  
  948.  
  949.    if (validate_mode(gc, mode) && validate_count(gc, count)) {
  950.       if (!arrays->array_info_cache_valid) {
  951.          fill_array_info_cache(arrays);
  952.       }
  953.  
  954.       arrays->DrawArrays(mode, first, count);
  955.    }
  956. }
  957.  
  958.  
  959. void
  960. __indirect_glArrayElement(GLint index)
  961. {
  962.    struct glx_context *gc = __glXGetCurrentContext();
  963.    const __GLXattribute *state =
  964.       (const __GLXattribute *) (gc->client_state_private);
  965.    struct array_state_vector *arrays = state->array_state;
  966.  
  967.    size_t single_vertex_size;
  968.  
  969.  
  970.    single_vertex_size = calculate_single_vertex_size_none(arrays);
  971.  
  972.    if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
  973.       gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
  974.    }
  975.  
  976.    gc->pc = emit_element_none(gc->pc, arrays, index);
  977.  
  978.    if (gc->pc > gc->limit) {
  979.       (void) __glXFlushRenderBuffer(gc, gc->pc);
  980.    }
  981. }
  982.  
  983.  
  984. void
  985. __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
  986.                           const GLvoid * indices)
  987. {
  988.    struct glx_context *gc = __glXGetCurrentContext();
  989.    const __GLXattribute *state =
  990.       (const __GLXattribute *) (gc->client_state_private);
  991.    struct array_state_vector *arrays = state->array_state;
  992.  
  993.  
  994.    if (validate_mode(gc, mode) && validate_count(gc, count)
  995.        && validate_type(gc, type)) {
  996.       if (!arrays->array_info_cache_valid) {
  997.          fill_array_info_cache(arrays);
  998.       }
  999.  
  1000.       arrays->DrawElements(mode, count, type, indices);
  1001.    }
  1002. }
  1003.  
  1004.  
  1005. void
  1006. __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
  1007.                                GLsizei count, GLenum type,
  1008.                                const GLvoid * indices)
  1009. {
  1010.    struct glx_context *gc = __glXGetCurrentContext();
  1011.    const __GLXattribute *state =
  1012.       (const __GLXattribute *) (gc->client_state_private);
  1013.    struct array_state_vector *arrays = state->array_state;
  1014.  
  1015.  
  1016.    if (validate_mode(gc, mode) && validate_count(gc, count)
  1017.        && validate_type(gc, type)) {
  1018.       if (end < start) {
  1019.          __glXSetError(gc, GL_INVALID_VALUE);
  1020.          return;
  1021.       }
  1022.  
  1023.       if (!arrays->array_info_cache_valid) {
  1024.          fill_array_info_cache(arrays);
  1025.       }
  1026.  
  1027.       arrays->DrawElements(mode, count, type, indices);
  1028.    }
  1029. }
  1030.  
  1031.  
  1032. void
  1033. __indirect_glMultiDrawArrays(GLenum mode, const GLint *first,
  1034.                                 const GLsizei *count, GLsizei primcount)
  1035. {
  1036.    struct glx_context *gc = __glXGetCurrentContext();
  1037.    const __GLXattribute *state =
  1038.       (const __GLXattribute *) (gc->client_state_private);
  1039.    struct array_state_vector *arrays = state->array_state;
  1040.    GLsizei i;
  1041.  
  1042.  
  1043.    if (validate_mode(gc, mode)) {
  1044.       if (!arrays->array_info_cache_valid) {
  1045.          fill_array_info_cache(arrays);
  1046.       }
  1047.  
  1048.       for (i = 0; i < primcount; i++) {
  1049.          if (validate_count(gc, count[i])) {
  1050.             arrays->DrawArrays(mode, first[i], count[i]);
  1051.          }
  1052.       }
  1053.    }
  1054. }
  1055.  
  1056.  
  1057. void
  1058. __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
  1059.                                   GLenum type, const GLvoid * const * indices,
  1060.                                   GLsizei primcount)
  1061. {
  1062.    struct glx_context *gc = __glXGetCurrentContext();
  1063.    const __GLXattribute *state =
  1064.       (const __GLXattribute *) (gc->client_state_private);
  1065.    struct array_state_vector *arrays = state->array_state;
  1066.    GLsizei i;
  1067.  
  1068.  
  1069.    if (validate_mode(gc, mode) && validate_type(gc, type)) {
  1070.       if (!arrays->array_info_cache_valid) {
  1071.          fill_array_info_cache(arrays);
  1072.       }
  1073.  
  1074.       for (i = 0; i < primcount; i++) {
  1075.          if (validate_count(gc, count[i])) {
  1076.             arrays->DrawElements(mode, count[i], type, indices[i]);
  1077.          }
  1078.       }
  1079.    }
  1080. }
  1081.  
  1082.  
  1083. #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
  1084.   do {                                                                  \
  1085.     (a)->data = PTR;                                                    \
  1086.     (a)->data_type = TYPE;                                              \
  1087.     (a)->user_stride = STRIDE;                                          \
  1088.     (a)->count = COUNT;                                                 \
  1089.     (a)->normalized = NORMALIZED;                                       \
  1090.                                                                         \
  1091.     (a)->element_size = __glXTypeSize( TYPE ) * COUNT;                  \
  1092.     (a)->true_stride = (STRIDE == 0)                                    \
  1093.       ? (a)->element_size : STRIDE;                                     \
  1094.                                                                         \
  1095.     (a)->header_size = HDR_SIZE;                                        \
  1096.     ((uint16_t *) (a)->header)[0] = __GLX_PAD((a)->header_size + (a)->element_size); \
  1097.     ((uint16_t *) (a)->header)[1] = OPCODE;                             \
  1098.   } while(0)
  1099.  
  1100.  
  1101. void
  1102. __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
  1103.                            const GLvoid * pointer)
  1104. {
  1105.    static const uint16_t short_ops[5] = {
  1106.       0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
  1107.    };
  1108.    static const uint16_t int_ops[5] = {
  1109.       0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
  1110.    };
  1111.    static const uint16_t float_ops[5] = {
  1112.       0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
  1113.    };
  1114.    static const uint16_t double_ops[5] = {
  1115.       0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
  1116.    };
  1117.    uint16_t opcode;
  1118.    struct glx_context *gc = __glXGetCurrentContext();
  1119.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1120.    struct array_state_vector *arrays = state->array_state;
  1121.    struct array_state *a;
  1122.  
  1123.  
  1124.    if (size < 2 || size > 4 || stride < 0) {
  1125.       __glXSetError(gc, GL_INVALID_VALUE);
  1126.       return;
  1127.    }
  1128.  
  1129.    switch (type) {
  1130.    case GL_SHORT:
  1131.       opcode = short_ops[size];
  1132.       break;
  1133.    case GL_INT:
  1134.       opcode = int_ops[size];
  1135.       break;
  1136.    case GL_FLOAT:
  1137.       opcode = float_ops[size];
  1138.       break;
  1139.    case GL_DOUBLE:
  1140.       opcode = double_ops[size];
  1141.       break;
  1142.    default:
  1143.       __glXSetError(gc, GL_INVALID_ENUM);
  1144.       return;
  1145.    }
  1146.  
  1147.    a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
  1148.    assert(a != NULL);
  1149.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
  1150.                           opcode);
  1151.  
  1152.    if (a->enabled) {
  1153.       arrays->array_info_cache_valid = GL_FALSE;
  1154.    }
  1155. }
  1156.  
  1157.  
  1158. void
  1159. __indirect_glNormalPointer(GLenum type, GLsizei stride,
  1160.                            const GLvoid * pointer)
  1161. {
  1162.    uint16_t opcode;
  1163.    struct glx_context *gc = __glXGetCurrentContext();
  1164.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1165.    struct array_state_vector *arrays = state->array_state;
  1166.    struct array_state *a;
  1167.  
  1168.  
  1169.    if (stride < 0) {
  1170.       __glXSetError(gc, GL_INVALID_VALUE);
  1171.       return;
  1172.    }
  1173.  
  1174.    switch (type) {
  1175.    case GL_BYTE:
  1176.       opcode = X_GLrop_Normal3bv;
  1177.       break;
  1178.    case GL_SHORT:
  1179.       opcode = X_GLrop_Normal3sv;
  1180.       break;
  1181.    case GL_INT:
  1182.       opcode = X_GLrop_Normal3iv;
  1183.       break;
  1184.    case GL_FLOAT:
  1185.       opcode = X_GLrop_Normal3fv;
  1186.       break;
  1187.    case GL_DOUBLE:
  1188.       opcode = X_GLrop_Normal3dv;
  1189.       break;
  1190.    default:
  1191.       __glXSetError(gc, GL_INVALID_ENUM);
  1192.       return;
  1193.    }
  1194.  
  1195.    a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
  1196.    assert(a != NULL);
  1197.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
  1198.  
  1199.    if (a->enabled) {
  1200.       arrays->array_info_cache_valid = GL_FALSE;
  1201.    }
  1202. }
  1203.  
  1204.  
  1205. void
  1206. __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
  1207.                           const GLvoid * pointer)
  1208. {
  1209.    static const uint16_t byte_ops[5] = {
  1210.       0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
  1211.    };
  1212.    static const uint16_t ubyte_ops[5] = {
  1213.       0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
  1214.    };
  1215.    static const uint16_t short_ops[5] = {
  1216.       0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
  1217.    };
  1218.    static const uint16_t ushort_ops[5] = {
  1219.       0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
  1220.    };
  1221.    static const uint16_t int_ops[5] = {
  1222.       0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
  1223.    };
  1224.    static const uint16_t uint_ops[5] = {
  1225.       0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
  1226.    };
  1227.    static const uint16_t float_ops[5] = {
  1228.       0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
  1229.    };
  1230.    static const uint16_t double_ops[5] = {
  1231.       0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
  1232.    };
  1233.    uint16_t opcode;
  1234.    struct glx_context *gc = __glXGetCurrentContext();
  1235.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1236.    struct array_state_vector *arrays = state->array_state;
  1237.    struct array_state *a;
  1238.  
  1239.  
  1240.    if (size < 3 || size > 4 || stride < 0) {
  1241.       __glXSetError(gc, GL_INVALID_VALUE);
  1242.       return;
  1243.    }
  1244.  
  1245.    switch (type) {
  1246.    case GL_BYTE:
  1247.       opcode = byte_ops[size];
  1248.       break;
  1249.    case GL_UNSIGNED_BYTE:
  1250.       opcode = ubyte_ops[size];
  1251.       break;
  1252.    case GL_SHORT:
  1253.       opcode = short_ops[size];
  1254.       break;
  1255.    case GL_UNSIGNED_SHORT:
  1256.       opcode = ushort_ops[size];
  1257.       break;
  1258.    case GL_INT:
  1259.       opcode = int_ops[size];
  1260.       break;
  1261.    case GL_UNSIGNED_INT:
  1262.       opcode = uint_ops[size];
  1263.       break;
  1264.    case GL_FLOAT:
  1265.       opcode = float_ops[size];
  1266.       break;
  1267.    case GL_DOUBLE:
  1268.       opcode = double_ops[size];
  1269.       break;
  1270.    default:
  1271.       __glXSetError(gc, GL_INVALID_ENUM);
  1272.       return;
  1273.    }
  1274.  
  1275.    a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
  1276.    assert(a != NULL);
  1277.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
  1278.  
  1279.    if (a->enabled) {
  1280.       arrays->array_info_cache_valid = GL_FALSE;
  1281.    }
  1282. }
  1283.  
  1284.  
  1285. void
  1286. __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
  1287. {
  1288.    uint16_t opcode;
  1289.    struct glx_context *gc = __glXGetCurrentContext();
  1290.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1291.    struct array_state_vector *arrays = state->array_state;
  1292.    struct array_state *a;
  1293.  
  1294.  
  1295.    if (stride < 0) {
  1296.       __glXSetError(gc, GL_INVALID_VALUE);
  1297.       return;
  1298.    }
  1299.  
  1300.    switch (type) {
  1301.    case GL_UNSIGNED_BYTE:
  1302.       opcode = X_GLrop_Indexubv;
  1303.       break;
  1304.    case GL_SHORT:
  1305.       opcode = X_GLrop_Indexsv;
  1306.       break;
  1307.    case GL_INT:
  1308.       opcode = X_GLrop_Indexiv;
  1309.       break;
  1310.    case GL_FLOAT:
  1311.       opcode = X_GLrop_Indexfv;
  1312.       break;
  1313.    case GL_DOUBLE:
  1314.       opcode = X_GLrop_Indexdv;
  1315.       break;
  1316.    default:
  1317.       __glXSetError(gc, GL_INVALID_ENUM);
  1318.       return;
  1319.    }
  1320.  
  1321.    a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
  1322.    assert(a != NULL);
  1323.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
  1324.  
  1325.    if (a->enabled) {
  1326.       arrays->array_info_cache_valid = GL_FALSE;
  1327.    }
  1328. }
  1329.  
  1330.  
  1331. void
  1332. __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
  1333. {
  1334.    struct glx_context *gc = __glXGetCurrentContext();
  1335.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1336.    struct array_state_vector *arrays = state->array_state;
  1337.    struct array_state *a;
  1338.  
  1339.  
  1340.    if (stride < 0) {
  1341.       __glXSetError(gc, GL_INVALID_VALUE);
  1342.       return;
  1343.    }
  1344.  
  1345.  
  1346.    a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
  1347.    assert(a != NULL);
  1348.    COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
  1349.                           4, X_GLrop_EdgeFlagv);
  1350.  
  1351.    if (a->enabled) {
  1352.       arrays->array_info_cache_valid = GL_FALSE;
  1353.    }
  1354. }
  1355.  
  1356.  
  1357. void
  1358. __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
  1359.                              const GLvoid * pointer)
  1360. {
  1361.    static const uint16_t short_ops[5] = {
  1362.       0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
  1363.       X_GLrop_TexCoord4sv
  1364.    };
  1365.    static const uint16_t int_ops[5] = {
  1366.       0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
  1367.       X_GLrop_TexCoord4iv
  1368.    };
  1369.    static const uint16_t float_ops[5] = {
  1370.       0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
  1371.       X_GLrop_TexCoord4fv
  1372.    };
  1373.    static const uint16_t double_ops[5] = {
  1374.       0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
  1375.       X_GLrop_TexCoord4dv
  1376.    };
  1377.  
  1378.    static const uint16_t mshort_ops[5] = {
  1379.       0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
  1380.       X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
  1381.    };
  1382.    static const uint16_t mint_ops[5] = {
  1383.       0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
  1384.       X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
  1385.    };
  1386.    static const uint16_t mfloat_ops[5] = {
  1387.       0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2fvARB,
  1388.       X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
  1389.    };
  1390.    static const uint16_t mdouble_ops[5] = {
  1391.       0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
  1392.       X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
  1393.    };
  1394.  
  1395.    uint16_t opcode;
  1396.    struct glx_context *gc = __glXGetCurrentContext();
  1397.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1398.    struct array_state_vector *arrays = state->array_state;
  1399.    struct array_state *a;
  1400.    unsigned header_size;
  1401.    unsigned index;
  1402.  
  1403.  
  1404.    if (size < 1 || size > 4 || stride < 0) {
  1405.       __glXSetError(gc, GL_INVALID_VALUE);
  1406.       return;
  1407.    }
  1408.  
  1409.    index = arrays->active_texture_unit;
  1410.    if (index == 0) {
  1411.       switch (type) {
  1412.       case GL_SHORT:
  1413.          opcode = short_ops[size];
  1414.          break;
  1415.       case GL_INT:
  1416.          opcode = int_ops[size];
  1417.          break;
  1418.       case GL_FLOAT:
  1419.          opcode = float_ops[size];
  1420.          break;
  1421.       case GL_DOUBLE:
  1422.          opcode = double_ops[size];
  1423.          break;
  1424.       default:
  1425.          __glXSetError(gc, GL_INVALID_ENUM);
  1426.          return;
  1427.       }
  1428.  
  1429.       header_size = 4;
  1430.    }
  1431.    else {
  1432.       switch (type) {
  1433.       case GL_SHORT:
  1434.          opcode = mshort_ops[size];
  1435.          break;
  1436.       case GL_INT:
  1437.          opcode = mint_ops[size];
  1438.          break;
  1439.       case GL_FLOAT:
  1440.          opcode = mfloat_ops[size];
  1441.          break;
  1442.       case GL_DOUBLE:
  1443.          opcode = mdouble_ops[size];
  1444.          break;
  1445.       default:
  1446.          __glXSetError(gc, GL_INVALID_ENUM);
  1447.          return;
  1448.       }
  1449.  
  1450.       header_size = 8;
  1451.    }
  1452.  
  1453.    a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
  1454.    assert(a != NULL);
  1455.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
  1456.                           header_size, opcode);
  1457.  
  1458.    if (a->enabled) {
  1459.       arrays->array_info_cache_valid = GL_FALSE;
  1460.    }
  1461. }
  1462.  
  1463.  
  1464. void
  1465. __indirect_glSecondaryColorPointer(GLint size, GLenum type, GLsizei stride,
  1466.                                       const GLvoid * pointer)
  1467. {
  1468.    uint16_t opcode;
  1469.    struct glx_context *gc = __glXGetCurrentContext();
  1470.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1471.    struct array_state_vector *arrays = state->array_state;
  1472.    struct array_state *a;
  1473.  
  1474.  
  1475.    if (size != 3 || stride < 0) {
  1476.       __glXSetError(gc, GL_INVALID_VALUE);
  1477.       return;
  1478.    }
  1479.  
  1480.    switch (type) {
  1481.    case GL_BYTE:
  1482.       opcode = 4126;
  1483.       break;
  1484.    case GL_UNSIGNED_BYTE:
  1485.       opcode = 4131;
  1486.       break;
  1487.    case GL_SHORT:
  1488.       opcode = 4127;
  1489.       break;
  1490.    case GL_UNSIGNED_SHORT:
  1491.       opcode = 4132;
  1492.       break;
  1493.    case GL_INT:
  1494.       opcode = 4128;
  1495.       break;
  1496.    case GL_UNSIGNED_INT:
  1497.       opcode = 4133;
  1498.       break;
  1499.    case GL_FLOAT:
  1500.       opcode = 4129;
  1501.       break;
  1502.    case GL_DOUBLE:
  1503.       opcode = 4130;
  1504.       break;
  1505.    default:
  1506.       __glXSetError(gc, GL_INVALID_ENUM);
  1507.       return;
  1508.    }
  1509.  
  1510.    a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
  1511.    if (a == NULL) {
  1512.       __glXSetError(gc, GL_INVALID_OPERATION);
  1513.       return;
  1514.    }
  1515.  
  1516.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
  1517.  
  1518.    if (a->enabled) {
  1519.       arrays->array_info_cache_valid = GL_FALSE;
  1520.    }
  1521. }
  1522.  
  1523.  
  1524. void
  1525. __indirect_glFogCoordPointer(GLenum type, GLsizei stride,
  1526.                                 const GLvoid * pointer)
  1527. {
  1528.    uint16_t opcode;
  1529.    struct glx_context *gc = __glXGetCurrentContext();
  1530.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1531.    struct array_state_vector *arrays = state->array_state;
  1532.    struct array_state *a;
  1533.  
  1534.  
  1535.    if (stride < 0) {
  1536.       __glXSetError(gc, GL_INVALID_VALUE);
  1537.       return;
  1538.    }
  1539.  
  1540.    switch (type) {
  1541.    case GL_FLOAT:
  1542.       opcode = 4124;
  1543.       break;
  1544.    case GL_DOUBLE:
  1545.       opcode = 4125;
  1546.       break;
  1547.    default:
  1548.       __glXSetError(gc, GL_INVALID_ENUM);
  1549.       return;
  1550.    }
  1551.  
  1552.    a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
  1553.    if (a == NULL) {
  1554.       __glXSetError(gc, GL_INVALID_OPERATION);
  1555.       return;
  1556.    }
  1557.  
  1558.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
  1559.  
  1560.    if (a->enabled) {
  1561.       arrays->array_info_cache_valid = GL_FALSE;
  1562.    }
  1563. }
  1564.  
  1565.  
  1566. void
  1567. __indirect_glVertexAttribPointer(GLuint index, GLint size,
  1568.                                     GLenum type, GLboolean normalized,
  1569.                                     GLsizei stride, const GLvoid * pointer)
  1570. {
  1571.    static const uint16_t short_ops[5] = { 0, 4189, 4190, 4191, 4192 };
  1572.    static const uint16_t float_ops[5] = { 0, 4193, 4194, 4195, 4196 };
  1573.    static const uint16_t double_ops[5] = { 0, 4197, 4198, 4199, 4200 };
  1574.  
  1575.    uint16_t opcode;
  1576.    struct glx_context *gc = __glXGetCurrentContext();
  1577.    __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
  1578.    struct array_state_vector *arrays = state->array_state;
  1579.    struct array_state *a;
  1580.    unsigned true_immediate_count;
  1581.    unsigned true_immediate_size;
  1582.  
  1583.  
  1584.    if ((size < 1) || (size > 4) || (stride < 0)
  1585.        || (index > arrays->num_vertex_program_attribs)) {
  1586.       __glXSetError(gc, GL_INVALID_VALUE);
  1587.       return;
  1588.    }
  1589.  
  1590.    if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
  1591.       switch (type) {
  1592.       case GL_BYTE:
  1593.          opcode = X_GLrop_VertexAttrib4NbvARB;
  1594.          break;
  1595.       case GL_UNSIGNED_BYTE:
  1596.          opcode = X_GLrop_VertexAttrib4NubvARB;
  1597.          break;
  1598.       case GL_SHORT:
  1599.          opcode = X_GLrop_VertexAttrib4NsvARB;
  1600.          break;
  1601.       case GL_UNSIGNED_SHORT:
  1602.          opcode = X_GLrop_VertexAttrib4NusvARB;
  1603.          break;
  1604.       case GL_INT:
  1605.          opcode = X_GLrop_VertexAttrib4NivARB;
  1606.          break;
  1607.       case GL_UNSIGNED_INT:
  1608.          opcode = X_GLrop_VertexAttrib4NuivARB;
  1609.          break;
  1610.       default:
  1611.          __glXSetError(gc, GL_INVALID_ENUM);
  1612.          return;
  1613.       }
  1614.  
  1615.       true_immediate_count = 4;
  1616.    }
  1617.    else {
  1618.       true_immediate_count = size;
  1619.  
  1620.       switch (type) {
  1621.       case GL_BYTE:
  1622.          opcode = X_GLrop_VertexAttrib4bvARB;
  1623.          true_immediate_count = 4;
  1624.          break;
  1625.       case GL_UNSIGNED_BYTE:
  1626.          opcode = X_GLrop_VertexAttrib4ubvARB;
  1627.          true_immediate_count = 4;
  1628.          break;
  1629.       case GL_SHORT:
  1630.          opcode = short_ops[size];
  1631.          break;
  1632.       case GL_UNSIGNED_SHORT:
  1633.          opcode = X_GLrop_VertexAttrib4usvARB;
  1634.          true_immediate_count = 4;
  1635.          break;
  1636.       case GL_INT:
  1637.          opcode = X_GLrop_VertexAttrib4ivARB;
  1638.          true_immediate_count = 4;
  1639.          break;
  1640.       case GL_UNSIGNED_INT:
  1641.          opcode = X_GLrop_VertexAttrib4uivARB;
  1642.          true_immediate_count = 4;
  1643.          break;
  1644.       case GL_FLOAT:
  1645.          opcode = float_ops[size];
  1646.          break;
  1647.       case GL_DOUBLE:
  1648.          opcode = double_ops[size];
  1649.          break;
  1650.       default:
  1651.          __glXSetError(gc, GL_INVALID_ENUM);
  1652.          return;
  1653.       }
  1654.    }
  1655.  
  1656.    a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
  1657.    if (a == NULL) {
  1658.       __glXSetError(gc, GL_INVALID_OPERATION);
  1659.       return;
  1660.    }
  1661.  
  1662.    COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
  1663.                           opcode);
  1664.  
  1665.    true_immediate_size = __glXTypeSize(type) * true_immediate_count;
  1666.    ((uint16_t *) (a)->header)[0] = __GLX_PAD(a->header_size
  1667.                                              + true_immediate_size);
  1668.  
  1669.    if (a->enabled) {
  1670.       arrays->array_info_cache_valid = GL_FALSE;
  1671.    }
  1672. }
  1673.  
  1674.  
  1675. /**
  1676.  * I don't have 100% confidence that this is correct.  The different rules
  1677.  * about whether or not generic vertex attributes alias "classic" vertex
  1678.  * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
  1679.  * ARB_vertex_shader, and NV_vertex_program are a bit confusing.  My
  1680.  * feeling is that the client-side doesn't have to worry about it.  The
  1681.  * client just sends all the data to the server and lets the server deal
  1682.  * with it.
  1683.  */
  1684. void
  1685. __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
  1686.                                    GLenum type, GLsizei stride,
  1687.                                    const GLvoid * pointer)
  1688. {
  1689.    struct glx_context *gc = __glXGetCurrentContext();
  1690.    GLboolean normalized = GL_FALSE;
  1691.  
  1692.  
  1693.    switch (type) {
  1694.    case GL_UNSIGNED_BYTE:
  1695.       if (size != 4) {
  1696.          __glXSetError(gc, GL_INVALID_VALUE);
  1697.          return;
  1698.       }
  1699.       normalized = GL_TRUE;
  1700.  
  1701.    case GL_SHORT:
  1702.    case GL_FLOAT:
  1703.    case GL_DOUBLE:
  1704.       __indirect_glVertexAttribPointer(index, size, type,
  1705.                                           normalized, stride, pointer);
  1706.       return;
  1707.    default:
  1708.       __glXSetError(gc, GL_INVALID_ENUM);
  1709.       return;
  1710.    }
  1711. }
  1712.  
  1713.  
  1714. void
  1715. __indirect_glClientActiveTexture(GLenum texture)
  1716. {
  1717.    struct glx_context *const gc = __glXGetCurrentContext();
  1718.    __GLXattribute *const state =
  1719.       (__GLXattribute *) (gc->client_state_private);
  1720.    struct array_state_vector *const arrays = state->array_state;
  1721.    const GLint unit = (GLint) texture - GL_TEXTURE0;
  1722.  
  1723.  
  1724.    if ((unit < 0) || (unit >= arrays->num_texture_units)) {
  1725.       __glXSetError(gc, GL_INVALID_ENUM);
  1726.       return;
  1727.    }
  1728.  
  1729.    arrays->active_texture_unit = unit;
  1730. }
  1731.  
  1732.  
  1733. /**
  1734.  * Modify the enable state for the selected array
  1735.  */
  1736. GLboolean
  1737. __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
  1738.                     GLboolean enable)
  1739. {
  1740.    struct array_state_vector *arrays = state->array_state;
  1741.    struct array_state *a;
  1742.  
  1743.  
  1744.    /* Texture coordinate arrays have an implict index set when the
  1745.     * application calls glClientActiveTexture.
  1746.     */
  1747.    if (key == GL_TEXTURE_COORD_ARRAY) {
  1748.       index = arrays->active_texture_unit;
  1749.    }
  1750.  
  1751.    a = get_array_entry(arrays, key, index);
  1752.  
  1753.    if ((a != NULL) && (a->enabled != enable)) {
  1754.       a->enabled = enable;
  1755.       arrays->array_info_cache_valid = GL_FALSE;
  1756.    }
  1757.  
  1758.    return (a != NULL);
  1759. }
  1760.  
  1761.  
  1762. void
  1763. __glXArrayDisableAll(__GLXattribute * state)
  1764. {
  1765.    struct array_state_vector *arrays = state->array_state;
  1766.    unsigned i;
  1767.  
  1768.  
  1769.    for (i = 0; i < arrays->num_arrays; i++) {
  1770.       arrays->arrays[i].enabled = GL_FALSE;
  1771.    }
  1772.  
  1773.    arrays->array_info_cache_valid = GL_FALSE;
  1774. }
  1775.  
  1776.  
  1777. /**
  1778.  */
  1779. GLboolean
  1780. __glXGetArrayEnable(const __GLXattribute * const state,
  1781.                     GLenum key, unsigned index, GLintptr * dest)
  1782. {
  1783.    const struct array_state_vector *arrays = state->array_state;
  1784.    const struct array_state *a =
  1785.       get_array_entry((struct array_state_vector *) arrays,
  1786.                       key, index);
  1787.  
  1788.    if (a != NULL) {
  1789.       *dest = (GLintptr) a->enabled;
  1790.    }
  1791.  
  1792.    return (a != NULL);
  1793. }
  1794.  
  1795.  
  1796. /**
  1797.  */
  1798. GLboolean
  1799. __glXGetArrayType(const __GLXattribute * const state,
  1800.                   GLenum key, unsigned index, GLintptr * dest)
  1801. {
  1802.    const struct array_state_vector *arrays = state->array_state;
  1803.    const struct array_state *a =
  1804.       get_array_entry((struct array_state_vector *) arrays,
  1805.                       key, index);
  1806.  
  1807.    if (a != NULL) {
  1808.       *dest = (GLintptr) a->data_type;
  1809.    }
  1810.  
  1811.    return (a != NULL);
  1812. }
  1813.  
  1814.  
  1815. /**
  1816.  */
  1817. GLboolean
  1818. __glXGetArraySize(const __GLXattribute * const state,
  1819.                   GLenum key, unsigned index, GLintptr * dest)
  1820. {
  1821.    const struct array_state_vector *arrays = state->array_state;
  1822.    const struct array_state *a =
  1823.       get_array_entry((struct array_state_vector *) arrays,
  1824.                       key, index);
  1825.  
  1826.    if (a != NULL) {
  1827.       *dest = (GLintptr) a->count;
  1828.    }
  1829.  
  1830.    return (a != NULL);
  1831. }
  1832.  
  1833.  
  1834. /**
  1835.  */
  1836. GLboolean
  1837. __glXGetArrayStride(const __GLXattribute * const state,
  1838.                     GLenum key, unsigned index, GLintptr * dest)
  1839. {
  1840.    const struct array_state_vector *arrays = state->array_state;
  1841.    const struct array_state *a =
  1842.       get_array_entry((struct array_state_vector *) arrays,
  1843.                       key, index);
  1844.  
  1845.    if (a != NULL) {
  1846.       *dest = (GLintptr) a->user_stride;
  1847.    }
  1848.  
  1849.    return (a != NULL);
  1850. }
  1851.  
  1852.  
  1853. /**
  1854.  */
  1855. GLboolean
  1856. __glXGetArrayPointer(const __GLXattribute * const state,
  1857.                      GLenum key, unsigned index, void **dest)
  1858. {
  1859.    const struct array_state_vector *arrays = state->array_state;
  1860.    const struct array_state *a =
  1861.       get_array_entry((struct array_state_vector *) arrays,
  1862.                       key, index);
  1863.  
  1864.  
  1865.    if (a != NULL) {
  1866.       *dest = (void *) (a->data);
  1867.    }
  1868.  
  1869.    return (a != NULL);
  1870. }
  1871.  
  1872.  
  1873. /**
  1874.  */
  1875. GLboolean
  1876. __glXGetArrayNormalized(const __GLXattribute * const state,
  1877.                         GLenum key, unsigned index, GLintptr * dest)
  1878. {
  1879.    const struct array_state_vector *arrays = state->array_state;
  1880.    const struct array_state *a =
  1881.       get_array_entry((struct array_state_vector *) arrays,
  1882.                       key, index);
  1883.  
  1884.  
  1885.    if (a != NULL) {
  1886.       *dest = (GLintptr) a->normalized;
  1887.    }
  1888.  
  1889.    return (a != NULL);
  1890. }
  1891.  
  1892.  
  1893. /**
  1894.  */
  1895. GLuint
  1896. __glXGetActiveTextureUnit(const __GLXattribute * const state)
  1897. {
  1898.    return state->array_state->active_texture_unit;
  1899. }
  1900.  
  1901.  
  1902. void
  1903. __glXPushArrayState(__GLXattribute * state)
  1904. {
  1905.    struct array_state_vector *arrays = state->array_state;
  1906.    struct array_stack_state *stack =
  1907.       &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
  1908.    unsigned i;
  1909.  
  1910.    /* XXX are we pushing _all_ the necessary fields? */
  1911.    for (i = 0; i < arrays->num_arrays; i++) {
  1912.       stack[i].data = arrays->arrays[i].data;
  1913.       stack[i].data_type = arrays->arrays[i].data_type;
  1914.       stack[i].user_stride = arrays->arrays[i].user_stride;
  1915.       stack[i].count = arrays->arrays[i].count;
  1916.       stack[i].key = arrays->arrays[i].key;
  1917.       stack[i].index = arrays->arrays[i].index;
  1918.       stack[i].enabled = arrays->arrays[i].enabled;
  1919.    }
  1920.  
  1921.    arrays->active_texture_unit_stack[arrays->stack_index] =
  1922.       arrays->active_texture_unit;
  1923.  
  1924.    arrays->stack_index++;
  1925. }
  1926.  
  1927.  
  1928. void
  1929. __glXPopArrayState(__GLXattribute * state)
  1930. {
  1931.    struct array_state_vector *arrays = state->array_state;
  1932.    struct array_stack_state *stack;
  1933.    unsigned i;
  1934.  
  1935.  
  1936.    arrays->stack_index--;
  1937.    stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
  1938.  
  1939.    for (i = 0; i < arrays->num_arrays; i++) {
  1940.       switch (stack[i].key) {
  1941.       case GL_NORMAL_ARRAY:
  1942.          __indirect_glNormalPointer(stack[i].data_type,
  1943.                                     stack[i].user_stride, stack[i].data);
  1944.          break;
  1945.       case GL_COLOR_ARRAY:
  1946.          __indirect_glColorPointer(stack[i].count,
  1947.                                    stack[i].data_type,
  1948.                                    stack[i].user_stride, stack[i].data);
  1949.          break;
  1950.       case GL_INDEX_ARRAY:
  1951.          __indirect_glIndexPointer(stack[i].data_type,
  1952.                                    stack[i].user_stride, stack[i].data);
  1953.          break;
  1954.       case GL_EDGE_FLAG_ARRAY:
  1955.          __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
  1956.          break;
  1957.       case GL_TEXTURE_COORD_ARRAY:
  1958.          arrays->active_texture_unit = stack[i].index;
  1959.          __indirect_glTexCoordPointer(stack[i].count,
  1960.                                       stack[i].data_type,
  1961.                                       stack[i].user_stride, stack[i].data);
  1962.          break;
  1963.       case GL_SECONDARY_COLOR_ARRAY:
  1964.          __indirect_glSecondaryColorPointer(stack[i].count,
  1965.                                                stack[i].data_type,
  1966.                                                stack[i].user_stride,
  1967.                                                stack[i].data);
  1968.          break;
  1969.       case GL_FOG_COORDINATE_ARRAY:
  1970.          __indirect_glFogCoordPointer(stack[i].data_type,
  1971.                                          stack[i].user_stride, stack[i].data);
  1972.          break;
  1973.  
  1974.       }
  1975.  
  1976.       __glXSetArrayEnable(state, stack[i].key, stack[i].index,
  1977.                           stack[i].enabled);
  1978.    }
  1979.  
  1980.    arrays->active_texture_unit =
  1981.       arrays->active_texture_unit_stack[arrays->stack_index];
  1982. }
  1983.