Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2012 Red Hat Inc.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20.  * OTHER DEALINGS IN THE SOFTWARE.
  21.  *
  22.  * Authors: Ben Skeggs
  23.  *
  24.  */
  25.  
  26. #include "util/u_format.h"
  27. #include "util/u_inlines.h"
  28. #include "translate/translate.h"
  29.  
  30. #include "nouveau_fence.h"
  31. #include "nv_object.xml.h"
  32. #include "nv30/nv30-40_3d.xml.h"
  33. #include "nv30/nv30_context.h"
  34. #include "nv30/nv30_format.h"
  35.  
  36. static void
  37. nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb,
  38.                   struct pipe_vertex_element *ve, unsigned attr)
  39. {
  40.    const unsigned nc = util_format_get_nr_components(ve->src_format);
  41.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  42.    struct nv04_resource *res = nv04_resource(vb->buffer);
  43.    const struct util_format_description *desc =
  44.       util_format_description(ve->src_format);
  45.    const void *data;
  46.    float v[4];
  47.  
  48.    data = nouveau_resource_map_offset(&nv30->base, res, vb->buffer_offset +
  49.                                       ve->src_offset, NOUVEAU_BO_RD);
  50.  
  51.    desc->unpack_rgba_float(v, 0, data, 0, 1, 1);
  52.  
  53.    switch (nc) {
  54.    case 4:
  55.       BEGIN_NV04(push, NV30_3D(VTX_ATTR_4F(attr)), 4);
  56.       PUSH_DATAf(push, v[0]);
  57.       PUSH_DATAf(push, v[1]);
  58.       PUSH_DATAf(push, v[2]);
  59.       PUSH_DATAf(push, v[3]);
  60.       break;
  61.    case 3:
  62.       BEGIN_NV04(push, NV30_3D(VTX_ATTR_3F(attr)), 3);
  63.       PUSH_DATAf(push, v[0]);
  64.       PUSH_DATAf(push, v[1]);
  65.       PUSH_DATAf(push, v[2]);
  66.       break;
  67.    case 2:
  68.       BEGIN_NV04(push, NV30_3D(VTX_ATTR_2F(attr)), 2);
  69.       PUSH_DATAf(push, v[0]);
  70.       PUSH_DATAf(push, v[1]);
  71.       break;
  72.    case 1:
  73.       BEGIN_NV04(push, NV30_3D(VTX_ATTR_1F(attr)), 1);
  74.       PUSH_DATAf(push, v[0]);
  75.       break;
  76.    default:
  77.       assert(0);
  78.       break;
  79.    }
  80. }
  81.  
  82. static INLINE void
  83. nv30_vbuf_range(struct nv30_context *nv30, int vbi,
  84.                 uint32_t *base, uint32_t *size)
  85. {
  86.    assert(nv30->vbo_max_index != ~0);
  87.    *base = nv30->vbo_min_index * nv30->vtxbuf[vbi].stride;
  88.    *size = (nv30->vbo_max_index -
  89.             nv30->vbo_min_index + 1) * nv30->vtxbuf[vbi].stride;
  90. }
  91.  
  92. static void
  93. nv30_prevalidate_vbufs(struct nv30_context *nv30)
  94. {
  95.    struct pipe_vertex_buffer *vb;
  96.    struct nv04_resource *buf;
  97.    int i;
  98.    uint32_t base, size;
  99.  
  100.    nv30->vbo_fifo = nv30->vbo_user = 0;
  101.  
  102.    for (i = 0; i < nv30->num_vtxbufs; i++) {
  103.       vb = &nv30->vtxbuf[i];
  104.       if (!vb->stride || !vb->buffer) /* NOTE: user_buffer not implemented */
  105.          continue;
  106.       buf = nv04_resource(vb->buffer);
  107.  
  108.       /* NOTE: user buffers with temporary storage count as mapped by GPU */
  109.       if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
  110.          if (nv30->vbo_push_hint) {
  111.             nv30->vbo_fifo = ~0;
  112.             continue;
  113.          } else {
  114.             if (buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) {
  115.                nv30->vbo_user |= 1 << i;
  116.                assert(vb->stride > vb->buffer_offset);
  117.                nv30_vbuf_range(nv30, i, &base, &size);
  118.                nouveau_user_buffer_upload(&nv30->base, buf, base, size);
  119.             } else {
  120.                nouveau_buffer_migrate(&nv30->base, buf, NOUVEAU_BO_GART);
  121.             }
  122.             nv30->base.vbo_dirty = TRUE;
  123.          }
  124.       }
  125.    }
  126. }
  127.  
  128. static void
  129. nv30_update_user_vbufs(struct nv30_context *nv30)
  130. {
  131.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  132.    uint32_t base, offset, size;
  133.    int i;
  134.    uint32_t written = 0;
  135.  
  136.    for (i = 0; i < nv30->vertex->num_elements; i++) {
  137.       struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
  138.       const int b = ve->vertex_buffer_index;
  139.       struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
  140.       struct nv04_resource *buf = nv04_resource(vb->buffer);
  141.  
  142.       if (!(nv30->vbo_user & (1 << b)))
  143.          continue;
  144.  
  145.       if (!vb->stride) {
  146.          nv30_emit_vtxattr(nv30, vb, ve, i);
  147.          continue;
  148.       }
  149.       nv30_vbuf_range(nv30, b, &base, &size);
  150.  
  151.       if (!(written & (1 << b))) {
  152.          written |= 1 << b;
  153.          nouveau_user_buffer_upload(&nv30->base, buf, base, size);
  154.       }
  155.  
  156.       offset = vb->buffer_offset + ve->src_offset;
  157.  
  158.       BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
  159.       PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP, buf, offset,
  160.                        NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
  161.                        0, NV30_3D_VTXBUF_DMA1);
  162.    }
  163.    nv30->base.vbo_dirty = TRUE;
  164. }
  165.  
  166. static INLINE void
  167. nv30_release_user_vbufs(struct nv30_context *nv30)
  168. {
  169.    uint32_t vbo_user = nv30->vbo_user;
  170.  
  171.    while (vbo_user) {
  172.       int i = ffs(vbo_user) - 1;
  173.       vbo_user &= ~(1 << i);
  174.  
  175.       nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer));
  176.    }
  177.  
  178.    nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
  179. }
  180.  
  181. void
  182. nv30_vbo_validate(struct nv30_context *nv30)
  183. {
  184.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  185.    struct nv30_vertex_stateobj *vertex = nv30->vertex;
  186.    struct pipe_vertex_element *ve;
  187.    struct pipe_vertex_buffer *vb;
  188.    unsigned i, redefine;
  189.  
  190.    nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
  191.    if (!nv30->vertex || nv30->draw_flags)
  192.       return;
  193.  
  194.    if (unlikely(vertex->need_conversion)) {
  195.       nv30->vbo_fifo = ~0;
  196.       nv30->vbo_user = 0;
  197.    } else {
  198.       nv30_prevalidate_vbufs(nv30);
  199.    }
  200.  
  201.    if (!PUSH_SPACE(push, 128))
  202.       return;
  203.  
  204.    redefine = MAX2(vertex->num_elements, nv30->state.num_vtxelts);
  205.    BEGIN_NV04(push, NV30_3D(VTXFMT(0)), redefine);
  206.  
  207.    for (i = 0; i < vertex->num_elements; i++) {
  208.       ve = &vertex->pipe[i];
  209.       vb = &nv30->vtxbuf[ve->vertex_buffer_index];
  210.  
  211.       if (likely(vb->stride) || nv30->vbo_fifo)
  212.          PUSH_DATA (push, (vb->stride << 8) | vertex->element[i].state);
  213.       else
  214.          PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
  215.    }
  216.  
  217.    for (; i < nv30->state.num_vtxelts; i++) {
  218.       PUSH_DATA (push, NV30_3D_VTXFMT_TYPE_V32_FLOAT);
  219.    }
  220.  
  221.    for (i = 0; i < vertex->num_elements; i++) {
  222.       struct nv04_resource *res;
  223.       unsigned offset;
  224.       boolean user;
  225.  
  226.       ve = &vertex->pipe[i];
  227.       vb = &nv30->vtxbuf[ve->vertex_buffer_index];
  228.       user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
  229.  
  230.       res = nv04_resource(vb->buffer);
  231.  
  232.       if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
  233.          if (!nv30->vbo_fifo)
  234.             nv30_emit_vtxattr(nv30, vb, ve, i);
  235.          continue;
  236.       }
  237.  
  238.       offset = ve->src_offset + vb->buffer_offset;
  239.  
  240.       BEGIN_NV04(push, NV30_3D(VTXBUF(i)), 1);
  241.       PUSH_RESRC(push, NV30_3D(VTXBUF(i)), user ? BUFCTX_VTXTMP : BUFCTX_VTXBUF,
  242.                        res, offset, NOUVEAU_BO_LOW | NOUVEAU_BO_RD,
  243.                        0, NV30_3D_VTXBUF_DMA1);
  244.    }
  245.  
  246.    nv30->state.num_vtxelts = vertex->num_elements;
  247. }
  248.  
  249. static void *
  250. nv30_vertex_state_create(struct pipe_context *pipe, unsigned num_elements,
  251.                          const struct pipe_vertex_element *elements)
  252. {
  253.     struct nv30_vertex_stateobj *so;
  254.     struct translate_key transkey;
  255.     unsigned i;
  256.  
  257.     assert(num_elements);
  258.  
  259.     so = MALLOC(sizeof(*so) + sizeof(*so->element) * num_elements);
  260.     if (!so)
  261.         return NULL;
  262.     memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
  263.     so->num_elements = num_elements;
  264.     so->need_conversion = FALSE;
  265.  
  266.     transkey.nr_elements = 0;
  267.     transkey.output_stride = 0;
  268.  
  269.     for (i = 0; i < num_elements; i++) {
  270.         const struct pipe_vertex_element *ve = &elements[i];
  271.         const unsigned vbi = ve->vertex_buffer_index;
  272.         enum pipe_format fmt = ve->src_format;
  273.  
  274.         so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
  275.         if (!so->element[i].state) {
  276.             switch (util_format_get_nr_components(fmt)) {
  277.             case 1: fmt = PIPE_FORMAT_R32_FLOAT; break;
  278.             case 2: fmt = PIPE_FORMAT_R32G32_FLOAT; break;
  279.             case 3: fmt = PIPE_FORMAT_R32G32B32_FLOAT; break;
  280.             case 4: fmt = PIPE_FORMAT_R32G32B32A32_FLOAT; break;
  281.             default:
  282.                 assert(0);
  283.                 FREE(so);
  284.                 return NULL;
  285.             }
  286.             so->element[i].state = nv30_vtxfmt(pipe->screen, fmt)->hw;
  287.             so->need_conversion = TRUE;
  288.         }
  289.  
  290.         if (1) {
  291.             unsigned j = transkey.nr_elements++;
  292.  
  293.             transkey.element[j].type = TRANSLATE_ELEMENT_NORMAL;
  294.             transkey.element[j].input_format = ve->src_format;
  295.             transkey.element[j].input_buffer = vbi;
  296.             transkey.element[j].input_offset = ve->src_offset;
  297.             transkey.element[j].instance_divisor = ve->instance_divisor;
  298.  
  299.             transkey.element[j].output_format = fmt;
  300.             transkey.element[j].output_offset = transkey.output_stride;
  301.             transkey.output_stride += (util_format_get_stride(fmt, 1) + 3) & ~3;
  302.         }
  303.     }
  304.  
  305.     so->translate = translate_create(&transkey);
  306.     so->vtx_size = transkey.output_stride / 4;
  307.     so->vtx_per_packet_max = NV04_PFIFO_MAX_PACKET_LEN / MAX2(so->vtx_size, 1);
  308.     return so;
  309. }
  310.  
  311. static void
  312. nv30_vertex_state_delete(struct pipe_context *pipe, void *hwcso)
  313. {
  314.    struct nv30_vertex_stateobj *so = hwcso;
  315.  
  316.    if (so->translate)
  317.       so->translate->release(so->translate);
  318.    FREE(hwcso);
  319. }
  320.  
  321. static void
  322. nv30_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
  323. {
  324.    struct nv30_context *nv30 = nv30_context(pipe);
  325.  
  326.    nv30->vertex = hwcso;
  327.    nv30->dirty |= NV30_NEW_VERTEX;
  328. }
  329.  
  330. static void
  331. nv30_draw_arrays(struct nv30_context *nv30,
  332.                  unsigned mode, unsigned start, unsigned count,
  333.                  unsigned instance_count)
  334. {
  335.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  336.    unsigned prim;
  337.  
  338.    prim = nv30_prim_gl(mode);
  339.  
  340.    BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  341.    PUSH_DATA (push, prim);
  342.    while (count) {
  343.       const unsigned mpush = 2047 * 256;
  344.       unsigned npush  = (count > mpush) ? mpush : count;
  345.       unsigned wpush  = ((npush + 255) & ~255) >> 8;
  346.  
  347.       count -= npush;
  348.  
  349.       BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), wpush);
  350.       while (npush >= 256) {
  351.          PUSH_DATA (push, 0xff000000 | start);
  352.          start += 256;
  353.          npush -= 256;
  354.       }
  355.  
  356.       if (npush)
  357.          PUSH_DATA (push, ((npush - 1) << 24) | start);
  358.    }
  359.    BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  360.    PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
  361. }
  362.  
  363. static void
  364. nv30_draw_elements_inline_u08(struct nouveau_pushbuf *push, const uint8_t *map,
  365.                               unsigned start, unsigned count)
  366. {
  367.    map += start;
  368.  
  369.    if (count & 1) {
  370.       BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
  371.       PUSH_DATA (push, *map++);
  372.    }
  373.  
  374.    count >>= 1;
  375.    while (count) {
  376.       unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
  377.       count -= npush;
  378.  
  379.       BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
  380.       while (npush--) {
  381.          PUSH_DATA (push, (map[1] << 16) | map[0]);
  382.          map += 2;
  383.       }
  384.    }
  385.  
  386. }
  387.  
  388. static void
  389. nv30_draw_elements_inline_u16(struct nouveau_pushbuf *push, const uint16_t *map,
  390.                               unsigned start, unsigned count)
  391. {
  392.    map += start;
  393.  
  394.    if (count & 1) {
  395.       BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
  396.       PUSH_DATA (push, *map++);
  397.    }
  398.  
  399.    count >>= 1;
  400.    while (count) {
  401.       unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
  402.       count -= npush;
  403.  
  404.       BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
  405.       while (npush--) {
  406.          PUSH_DATA (push, (map[1] << 16) | map[0]);
  407.          map += 2;
  408.       }
  409.    }
  410. }
  411.  
  412. static void
  413. nv30_draw_elements_inline_u32(struct nouveau_pushbuf *push, const uint32_t *map,
  414.                               unsigned start, unsigned count)
  415. {
  416.    map += start;
  417.  
  418.    while (count) {
  419.       const unsigned nr = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
  420.  
  421.       BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U32), nr);
  422.       PUSH_DATAp(push, map, nr);
  423.  
  424.       map += nr;
  425.       count -= nr;
  426.    }
  427. }
  428.  
  429. static void
  430. nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
  431.                                     const uint32_t *map,
  432.                                     unsigned start, unsigned count)
  433. {
  434.    map += start;
  435.  
  436.    if (count & 1) {
  437.       BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
  438.       PUSH_DATA (push, *map++);
  439.    }
  440.  
  441.    count >>= 1;
  442.    while (count) {
  443.       unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);;
  444.       count -= npush;
  445.  
  446.       BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
  447.       while (npush--) {
  448.          PUSH_DATA (push, (map[1] << 16) | map[0]);
  449.          map += 2;
  450.       }
  451.    }
  452. }
  453.  
  454. static void
  455. nv30_draw_elements(struct nv30_context *nv30, boolean shorten,
  456.                    unsigned mode, unsigned start, unsigned count,
  457.                    unsigned instance_count, int32_t index_bias)
  458. {
  459.    const unsigned index_size = nv30->idxbuf.index_size;
  460.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  461.    struct nouveau_object *eng3d = nv30->screen->eng3d;
  462.    unsigned prim = nv30_prim_gl(mode);
  463.  
  464. #if 0 /*XXX*/
  465.    if (index_bias != nv30->state.index_bias) {
  466.       BEGIN_NV04(push, NV30_3D(VB_ELEMENT_BASE), 1);
  467.       PUSH_DATA (push, index_bias);
  468.       nv30->state.index_bias = index_bias;
  469.    }
  470. #endif
  471.  
  472.    if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
  473.        nv30->idxbuf.buffer) {
  474.       struct nv04_resource *res = nv04_resource(nv30->idxbuf.buffer);
  475.       unsigned offset = nv30->idxbuf.offset;
  476.  
  477.       assert(nouveau_resource_mapped_by_gpu(&res->base));
  478.  
  479.       BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2);
  480.       PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset,
  481.                        NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0);
  482.       PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo,
  483.                        (index_size == 2) ? 0x00000010 : 0x00000000,
  484.                        res->domain | NOUVEAU_BO_RD,
  485.                        0, NV30_3D_IDXBUF_FORMAT_DMA1);
  486.       BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  487.       PUSH_DATA (push, prim);
  488.       while (count) {
  489.          const unsigned mpush = 2047 * 256;
  490.          unsigned npush  = (count > mpush) ? mpush : count;
  491.          unsigned wpush  = ((npush + 255) & ~255) >> 8;
  492.  
  493.          count -= npush;
  494.  
  495.          BEGIN_NI04(push, NV30_3D(VB_INDEX_BATCH), wpush);
  496.          while (npush >= 256) {
  497.             PUSH_DATA (push, 0xff000000 | start);
  498.             start += 256;
  499.             npush -= 256;
  500.          }
  501.  
  502.          if (npush)
  503.             PUSH_DATA (push, ((npush - 1) << 24) | start);
  504.       }
  505.       BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  506.       PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
  507.       PUSH_RESET(push, BUFCTX_IDXBUF);
  508.    } else {
  509.       const void *data;
  510.       if (nv30->idxbuf.buffer)
  511.          data = nouveau_resource_map_offset(&nv30->base,
  512.                                             nv04_resource(nv30->idxbuf.buffer),
  513.                                             nv30->idxbuf.offset, NOUVEAU_BO_RD);
  514.       else
  515.          data = nv30->idxbuf.user_buffer;
  516.       if (!data)
  517.          return;
  518.  
  519.       BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  520.       PUSH_DATA (push, prim);
  521.       switch (index_size) {
  522.       case 1:
  523.          nv30_draw_elements_inline_u08(push, data, start, count);
  524.          break;
  525.       case 2:
  526.          nv30_draw_elements_inline_u16(push, data, start, count);
  527.          break;
  528.       case 4:
  529.          if (shorten)
  530.             nv30_draw_elements_inline_u32_short(push, data, start, count);
  531.          else
  532.             nv30_draw_elements_inline_u32(push, data, start, count);
  533.          break;
  534.       default:
  535.          assert(0);
  536.          return;
  537.       }
  538.       BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  539.       PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
  540.    }
  541. }
  542.  
  543. static void
  544. nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
  545. {
  546.    struct nv30_context *nv30 = nv30_context(pipe);
  547.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  548.    int i;
  549.  
  550.    /* For picking only a few vertices from a large user buffer, push is better,
  551.     * if index count is larger and we expect repeated vertices, suggest upload.
  552.     */
  553.    nv30->vbo_push_hint = /* the 64 is heuristic */
  554.       !(info->indexed &&
  555.         ((info->max_index - info->min_index + 64) < info->count));
  556.  
  557.    nv30->vbo_min_index = info->min_index;
  558.    nv30->vbo_max_index = info->max_index;
  559.  
  560.    if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
  561.       nv30->dirty |= NV30_NEW_ARRAYS;
  562.  
  563.    push->user_priv = &nv30->bufctx;
  564.    if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
  565.       nv30_update_user_vbufs(nv30);
  566.  
  567.    nv30_state_validate(nv30, TRUE);
  568.    if (nv30->draw_flags) {
  569.       nv30_render_vbo(pipe, info);
  570.       return;
  571.    } else
  572.    if (nv30->vbo_fifo) {
  573.       nv30_push_vbo(nv30, info);
  574.       return;
  575.    }
  576.  
  577.    for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
  578.       if (!nv30->vtxbuf[i].buffer)
  579.          continue;
  580.       if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
  581.          nv30->base.vbo_dirty = TRUE;
  582.    }
  583.  
  584.    if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer &&
  585.        nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
  586.       nv30->base.vbo_dirty = TRUE;
  587.  
  588.    if (nv30->base.vbo_dirty) {
  589.       BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
  590.       PUSH_DATA (push, 0);
  591.       nv30->base.vbo_dirty = FALSE;
  592.    }
  593.  
  594.    if (!info->indexed) {
  595.       nv30_draw_arrays(nv30,
  596.                        info->mode, info->start, info->count,
  597.                        info->instance_count);
  598.    } else {
  599.       boolean shorten = info->max_index <= 65535;
  600.  
  601.       if (info->primitive_restart != nv30->state.prim_restart) {
  602.          if (info->primitive_restart) {
  603.             BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 2);
  604.             PUSH_DATA (push, 1);
  605.             PUSH_DATA (push, info->restart_index);
  606.  
  607.             if (info->restart_index > 65535)
  608.                shorten = FALSE;
  609.          } else {
  610.             BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 1);
  611.             PUSH_DATA (push, 0);
  612.          }
  613.          nv30->state.prim_restart = info->primitive_restart;
  614.       } else
  615.       if (info->primitive_restart) {
  616.          BEGIN_NV04(push, NV40_3D(PRIM_RESTART_INDEX), 1);
  617.          PUSH_DATA (push, info->restart_index);
  618.  
  619.          if (info->restart_index > 65535)
  620.             shorten = FALSE;
  621.       }
  622.  
  623.       nv30_draw_elements(nv30, shorten,
  624.                          info->mode, info->start, info->count,
  625.                          info->instance_count, info->index_bias);
  626.    }
  627.  
  628.    nv30_state_release(nv30);
  629.    nv30_release_user_vbufs(nv30);
  630. }
  631.  
  632. void
  633. nv30_vbo_init(struct pipe_context *pipe)
  634. {
  635.    pipe->create_vertex_elements_state = nv30_vertex_state_create;
  636.    pipe->delete_vertex_elements_state = nv30_vertex_state_delete;
  637.    pipe->bind_vertex_elements_state = nv30_vertex_state_bind;
  638.    pipe->draw_vbo = nv30_draw_vbo;
  639. }
  640.