Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2012 Red Hat Inc.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20.  * OTHER DEALINGS IN THE SOFTWARE.
  21.  *
  22.  * Authors: Ben Skeggs
  23.  *
  24.  */
  25.  
  26. #include "draw/draw_context.h"
  27. #include "draw/draw_vertex.h"
  28. #include "draw/draw_pipe.h"
  29. #include "draw/draw_vbuf.h"
  30. #include "draw/draw_private.h"
  31.  
  32. #include "nv_object.xml.h"
  33. #include "nv30/nv30-40_3d.xml.h"
  34. #include "nv30/nv30_context.h"
  35. #include "nv30/nv30_format.h"
  36.  
  37. struct nv30_render {
  38.    struct vbuf_render base;
  39.    struct nv30_context *nv30;
  40.  
  41.    struct pipe_transfer *transfer;
  42.    struct pipe_resource *buffer;
  43.    unsigned offset;
  44.    unsigned length;
  45.  
  46.    struct vertex_info vertex_info;
  47.  
  48.    struct nouveau_heap *vertprog;
  49.    uint32_t vtxprog[16][4];
  50.    uint32_t vtxfmt[16];
  51.    uint32_t vtxptr[16];
  52.    uint32_t prim;
  53. };
  54.  
  55. static INLINE struct nv30_render *
  56. nv30_render(struct vbuf_render *render)
  57. {
  58.    return (struct nv30_render *)render;
  59. }
  60.  
  61. static const struct vertex_info *
  62. nv30_render_get_vertex_info(struct vbuf_render *render)
  63. {
  64.    return &nv30_render(render)->vertex_info;
  65. }
  66.  
  67. static boolean
  68. nv30_render_allocate_vertices(struct vbuf_render *render,
  69.                               ushort vertex_size, ushort nr_vertices)
  70. {
  71.    struct nv30_render *r = nv30_render(render);
  72.    struct nv30_context *nv30 = r->nv30;
  73.  
  74.    r->length = (uint32_t)vertex_size * (uint32_t)nr_vertices;
  75.  
  76.    if (r->offset + r->length >= render->max_vertex_buffer_bytes) {
  77.       pipe_resource_reference(&r->buffer, NULL);
  78.       r->buffer = pipe_buffer_create(&nv30->screen->base.base,
  79.                                      PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_STREAM,
  80.                                      render->max_vertex_buffer_bytes);
  81.       if (!r->buffer)
  82.          return FALSE;
  83.  
  84.       r->offset = 0;
  85.    }
  86.  
  87.    return TRUE;
  88. }
  89.  
  90. static void *
  91. nv30_render_map_vertices(struct vbuf_render *render)
  92. {
  93.    struct nv30_render *r = nv30_render(render);
  94.    char *map = pipe_buffer_map_range(
  95.          &r->nv30->base.pipe, r->buffer,
  96.          r->offset, r->length,
  97.          PIPE_TRANSFER_WRITE |
  98.          PIPE_TRANSFER_DISCARD_RANGE,
  99.          &r->transfer);
  100.    assert(map);
  101.    return map;
  102. }
  103.  
  104. static void
  105. nv30_render_unmap_vertices(struct vbuf_render *render,
  106.                            ushort min_index, ushort max_index)
  107. {
  108.    struct nv30_render *r = nv30_render(render);
  109.    pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer);
  110.    r->transfer = NULL;
  111. }
  112.  
  113. static void
  114. nv30_render_set_primitive(struct vbuf_render *render, unsigned prim)
  115. {
  116.    struct nv30_render *r = nv30_render(render);
  117.  
  118.    r->prim = nv30_prim_gl(prim);
  119. }
  120.  
  121. static void
  122. nv30_render_draw_elements(struct vbuf_render *render,
  123.                           const ushort *indices, uint count)
  124. {
  125.    struct nv30_render *r = nv30_render(render);
  126.    struct nv30_context *nv30 = r->nv30;
  127.    struct nouveau_pushbuf *push = nv30->screen->base.pushbuf;
  128.    unsigned i;
  129.  
  130.    BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs);
  131.    for (i = 0; i < r->vertex_info.num_attribs; i++) {
  132.       PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP,
  133.                        nv04_resource(r->buffer), r->offset + r->vtxptr[i],
  134.                        NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
  135.    }
  136.  
  137.    if (!nv30_state_validate(nv30, FALSE))
  138.       return;
  139.  
  140.    BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  141.    PUSH_DATA (push, r->prim);
  142.  
  143.    if (count & 1) {
  144.       BEGIN_NV04(push, NV30_3D(VB_ELEMENT_U32), 1);
  145.       PUSH_DATA (push, *indices++);
  146.    }
  147.  
  148.    count >>= 1;
  149.    while (count) {
  150.       unsigned npush = MIN2(count, NV04_PFIFO_MAX_PACKET_LEN);
  151.       count -= npush;
  152.  
  153.       BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
  154.       while (npush--) {
  155.          PUSH_DATA(push, (indices[1] << 16) | indices[0]);
  156.          indices += 2;
  157.       }
  158.    }
  159.  
  160.    BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  161.    PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
  162.    PUSH_RESET(push, BUFCTX_VTXTMP);
  163. }
  164.  
  165. static void
  166. nv30_render_draw_arrays(struct vbuf_render *render, unsigned start, uint nr)
  167. {
  168.    struct nv30_render *r = nv30_render(render);
  169.    struct nv30_context *nv30 = r->nv30;
  170.    struct nouveau_pushbuf *push = nv30->base.pushbuf;
  171.    unsigned fn = nr >> 8, pn = nr & 0xff;
  172.    unsigned ps = fn + (pn ? 1 : 0);
  173.    unsigned i;
  174.  
  175.    BEGIN_NV04(push, NV30_3D(VTXBUF(0)), r->vertex_info.num_attribs);
  176.    for (i = 0; i < r->vertex_info.num_attribs; i++) {
  177.       PUSH_RESRC(push, NV30_3D(VTXBUF(i)), BUFCTX_VTXTMP,
  178.                        nv04_resource(r->buffer), r->offset + r->vtxptr[i],
  179.                        NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, NV30_3D_VTXBUF_DMA1);
  180.    }
  181.  
  182.    if (!nv30_state_validate(nv30, FALSE))
  183.       return;
  184.  
  185.    BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  186.    PUSH_DATA (push, r->prim);
  187.  
  188.    BEGIN_NI04(push, NV30_3D(VB_VERTEX_BATCH), ps);
  189.    while (fn--) {
  190.       PUSH_DATA (push, 0xff000000 | start);
  191.       start += 256;
  192.    }
  193.  
  194.    if (pn)
  195.       PUSH_DATA (push, ((pn - 1) << 24) | start);
  196.  
  197.    BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
  198.    PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
  199.    PUSH_RESET(push, BUFCTX_VTXTMP);
  200. }
  201.  
  202. static void
  203. nv30_render_release_vertices(struct vbuf_render *render)
  204. {
  205.    struct nv30_render *r = nv30_render(render);
  206.    r->offset += r->length;
  207. }
  208.  
  209. static const struct {
  210.    unsigned emit;
  211.    unsigned interp;
  212.    unsigned vp30;
  213.    unsigned vp40;
  214.    unsigned ow40;
  215. } vroute [] = {
  216.    [TGSI_SEMANTIC_POSITION] = { EMIT_4F, INTERP_PERSPECTIVE, 0, 0, 0x00000000 },
  217.    [TGSI_SEMANTIC_COLOR   ] = { EMIT_4F, INTERP_LINEAR     , 3, 1, 0x00000001 },
  218.    [TGSI_SEMANTIC_BCOLOR  ] = { EMIT_4F, INTERP_LINEAR     , 1, 3, 0x00000004 },
  219.    [TGSI_SEMANTIC_FOG     ] = { EMIT_4F, INTERP_PERSPECTIVE, 5, 5, 0x00000010 },
  220.    [TGSI_SEMANTIC_PSIZE   ] = { EMIT_1F_PSIZE, INTERP_POS  , 6, 6, 0x00000020 },
  221.    [TGSI_SEMANTIC_TEXCOORD] = { EMIT_4F, INTERP_PERSPECTIVE, 8, 7, 0x00004000 },
  222. };
  223.  
  224. static boolean
  225. vroute_add(struct nv30_render *r, uint attrib, uint sem, uint *idx)
  226. {
  227.    struct nv30_screen *screen = r->nv30->screen;
  228.    struct nv30_fragprog *fp = r->nv30->fragprog.program;
  229.    struct vertex_info *vinfo = &r->vertex_info;
  230.    enum pipe_format format;
  231.    uint emit = EMIT_OMIT;
  232.    uint result = *idx;
  233.  
  234.    if (sem == TGSI_SEMANTIC_GENERIC) {
  235.       uint num_texcoords = (screen->eng3d->oclass < NV40_3D_CLASS) ? 8 : 10;
  236.       for (result = 0; result < num_texcoords; result++) {
  237.          if (fp->texcoord[result] == *idx + 8) {
  238.             sem = TGSI_SEMANTIC_TEXCOORD;
  239.             emit = vroute[sem].emit;
  240.             break;
  241.          }
  242.       }
  243.    } else {
  244.       emit = vroute[sem].emit;
  245.    }
  246.  
  247.    if (emit == EMIT_OMIT)
  248.       return FALSE;
  249.  
  250.    draw_emit_vertex_attr(vinfo, emit, vroute[sem].interp, attrib);
  251.    format = draw_translate_vinfo_format(emit);
  252.  
  253.    r->vtxfmt[attrib] = nv30_vtxfmt(&screen->base.base, format)->hw;
  254.    r->vtxptr[attrib] = vinfo->size;
  255.    vinfo->size += draw_translate_vinfo_size(emit);
  256.  
  257.    if (screen->eng3d->oclass < NV40_3D_CLASS) {
  258.       r->vtxprog[attrib][0] = 0x001f38d8;
  259.       r->vtxprog[attrib][1] = 0x0080001b | (attrib << 9);
  260.       r->vtxprog[attrib][2] = 0x0836106c;
  261.       r->vtxprog[attrib][3] = 0x2000f800 | (result + vroute[sem].vp30) << 2;
  262.    } else {
  263.       r->vtxprog[attrib][0] = 0x401f9c6c;
  264.       r->vtxprog[attrib][1] = 0x0040000d | (attrib << 8);
  265.       r->vtxprog[attrib][2] = 0x8106c083;
  266.       r->vtxprog[attrib][3] = 0x6041ff80 | (result + vroute[sem].vp40) << 2;
  267.    }
  268.  
  269.    if (result < 8)
  270.       *idx = vroute[sem].ow40 << result;
  271.    else {
  272.       assert(sem == TGSI_SEMANTIC_TEXCOORD);
  273.       *idx = 0x00001000 << (result - 8);
  274.    }
  275.    return TRUE;
  276. }
  277.  
  278. static boolean
  279. nv30_render_validate(struct nv30_context *nv30)
  280. {
  281.    struct nv30_render *r = nv30_render(nv30->draw->render);
  282.    struct nv30_rasterizer_stateobj *rast = nv30->rast;
  283.    struct pipe_screen *pscreen = &nv30->screen->base.base;
  284.    struct nouveau_pushbuf *push = nv30->screen->base.pushbuf;
  285.    struct nouveau_object *eng3d = nv30->screen->eng3d;
  286.    struct nv30_vertprog *vp = nv30->vertprog.program;
  287.    struct vertex_info *vinfo = &r->vertex_info;
  288.    unsigned vp_attribs = 0;
  289.    unsigned vp_results = 0;
  290.    unsigned attrib = 0;
  291.    unsigned pntc;
  292.    int i;
  293.  
  294.    if (!r->vertprog) {
  295.       struct nouveau_heap *heap = nv30_screen(pscreen)->vp_exec_heap;
  296.       if (nouveau_heap_alloc(heap, 16, &r->vertprog, &r->vertprog)) {
  297.          while (heap->next && heap->size < 16) {
  298.             struct nouveau_heap **evict = heap->next->priv;
  299.             nouveau_heap_free(evict);
  300.          }
  301.  
  302.          if (nouveau_heap_alloc(heap, 16, &r->vertprog, &r->vertprog))
  303.             return FALSE;
  304.       }
  305.    }
  306.  
  307.    vinfo->num_attribs = 0;
  308.    vinfo->size = 0;
  309.  
  310.    /* setup routing for all necessary vp outputs */
  311.    for (i = 0; i < vp->info.num_outputs && attrib < 16; i++) {
  312.       uint semantic = vp->info.output_semantic_name[i];
  313.       uint index = vp->info.output_semantic_index[i];
  314.       if (vroute_add(r, attrib, semantic, &index)) {
  315.          vp_attribs |= (1 << attrib++);
  316.          vp_results |= index;
  317.       }
  318.    }
  319.  
  320.    /* setup routing for replaced point coords not written by vp */
  321.    if (rast && rast->pipe.point_quad_rasterization)
  322.       pntc = rast->pipe.sprite_coord_enable & 0x000002ff;
  323.    else
  324.       pntc = 0;
  325.  
  326.    while (pntc && attrib < 16) {
  327.       uint index = ffs(pntc) - 1; pntc &= ~(1 << index);
  328.       if (vroute_add(r, attrib, TGSI_SEMANTIC_TEXCOORD, &index)) {
  329.          vp_attribs |= (1 << attrib++);
  330.          vp_results |= index;
  331.       }
  332.    }
  333.  
  334.    /* modify vertex format for correct stride, and stub out unused ones */
  335.    BEGIN_NV04(push, NV30_3D(VP_UPLOAD_FROM_ID), 1);
  336.    PUSH_DATA (push, r->vertprog->start);
  337.    r->vtxprog[attrib - 1][3] |= 1;
  338.    for (i = 0; i < attrib; i++) {
  339.       BEGIN_NV04(push, NV30_3D(VP_UPLOAD_INST(0)), 4);
  340.       PUSH_DATAp(push, r->vtxprog[i], 4);
  341.       r->vtxfmt[i] |= vinfo->size << 8;
  342.    }
  343.    for (; i < 16; i++)
  344.       r->vtxfmt[i]  = NV30_3D_VTXFMT_TYPE_V32_FLOAT;
  345.  
  346.    BEGIN_NV04(push, NV30_3D(VIEWPORT_TRANSLATE_X), 8);
  347.    PUSH_DATAf(push, 0.0);
  348.    PUSH_DATAf(push, 0.0);
  349.    PUSH_DATAf(push, 0.0);
  350.    PUSH_DATAf(push, 0.0);
  351.    PUSH_DATAf(push, 1.0);
  352.    PUSH_DATAf(push, 1.0);
  353.    PUSH_DATAf(push, 1.0);
  354.    PUSH_DATAf(push, 1.0);
  355.    BEGIN_NV04(push, NV30_3D(DEPTH_RANGE_NEAR), 2);
  356.    PUSH_DATAf(push, 0.0);
  357.    PUSH_DATAf(push, 1.0);
  358.  
  359.    BEGIN_NV04(push, NV30_3D(VTXFMT(0)), 16);
  360.    PUSH_DATAp(push, r->vtxfmt, 16);
  361.  
  362.    BEGIN_NV04(push, NV30_3D(VP_START_FROM_ID), 1);
  363.    PUSH_DATA (push, r->vertprog->start);
  364.    BEGIN_NV04(push, NV30_3D(ENGINE), 1);
  365.    PUSH_DATA (push, 0x00000103);
  366.    if (eng3d->oclass >= NV40_3D_CLASS) {
  367.       BEGIN_NV04(push, NV40_3D(VP_ATTRIB_EN), 2);
  368.       PUSH_DATA (push, vp_attribs);
  369.       PUSH_DATA (push, vp_results);
  370.    }
  371.  
  372.    vinfo->size /= 4;
  373.    return TRUE;
  374. }
  375.  
  376. void
  377. nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
  378. {
  379.    struct nv30_context *nv30 = nv30_context(pipe);
  380.    struct draw_context *draw = nv30->draw;
  381.    struct pipe_transfer *transfer[PIPE_MAX_ATTRIBS] = {NULL};
  382.    struct pipe_transfer *transferi = NULL;
  383.    int i;
  384.  
  385.    nv30_render_validate(nv30);
  386.  
  387.    if (nv30->draw_dirty & NV30_NEW_VIEWPORT)
  388.       draw_set_viewport_states(draw, 0, 1, &nv30->viewport);
  389.    if (nv30->draw_dirty & NV30_NEW_RASTERIZER)
  390.       draw_set_rasterizer_state(draw, &nv30->rast->pipe, NULL);
  391.    if (nv30->draw_dirty & NV30_NEW_CLIP)
  392.       draw_set_clip_state(draw, &nv30->clip);
  393.    if (nv30->draw_dirty & NV30_NEW_ARRAYS) {
  394.       draw_set_vertex_buffers(draw, 0, nv30->num_vtxbufs, nv30->vtxbuf);
  395.       draw_set_vertex_elements(draw, nv30->vertex->num_elements, nv30->vertex->pipe);
  396.    }
  397.    if (nv30->draw_dirty & NV30_NEW_FRAGPROG) {
  398.       struct nv30_fragprog *fp = nv30->fragprog.program;
  399.       if (!fp->draw)
  400.          fp->draw = draw_create_fragment_shader(draw, &fp->pipe);
  401.       draw_bind_fragment_shader(draw, fp->draw);
  402.    }
  403.    if (nv30->draw_dirty & NV30_NEW_VERTPROG) {
  404.       struct nv30_vertprog *vp = nv30->vertprog.program;
  405.       if (!vp->draw)
  406.          vp->draw = draw_create_vertex_shader(draw, &vp->pipe);
  407.       draw_bind_vertex_shader(draw, vp->draw);
  408.    }
  409.    if (nv30->draw_dirty & NV30_NEW_VERTCONST) {
  410.       if (nv30->vertprog.constbuf) {
  411.          void *map = nv04_resource(nv30->vertprog.constbuf)->data;
  412.          draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0,
  413.                                          map, nv30->vertprog.constbuf_nr * 16);
  414.       } else {
  415.          draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0);
  416.       }
  417.    }
  418.  
  419.    for (i = 0; i < nv30->num_vtxbufs; i++) {
  420.       const void *map = nv30->vtxbuf[i].user_buffer;
  421.       if (!map) {
  422.          if (nv30->vtxbuf[i].buffer)
  423.             map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer,
  424.                                   PIPE_TRANSFER_UNSYNCHRONIZED |
  425.                                   PIPE_TRANSFER_READ, &transfer[i]);
  426.       }
  427.       draw_set_mapped_vertex_buffer(draw, i, map, ~0);
  428.    }
  429.  
  430.    if (info->indexed) {
  431.       const void *map = nv30->idxbuf.user_buffer;
  432.       if (!map)
  433.          map = pipe_buffer_map(pipe, nv30->idxbuf.buffer,
  434.                                PIPE_TRANSFER_UNSYNCHRONIZED |
  435.                                PIPE_TRANSFER_READ, &transferi);
  436.       draw_set_indexes(draw,
  437.                        (ubyte *) map + nv30->idxbuf.offset,
  438.                        nv30->idxbuf.index_size, ~0);
  439.    } else {
  440.       draw_set_indexes(draw, NULL, 0, 0);
  441.    }
  442.  
  443.    draw_vbo(draw, info);
  444.    draw_flush(draw);
  445.  
  446.    if (info->indexed && transferi)
  447.       pipe_buffer_unmap(pipe, transferi);
  448.    for (i = 0; i < nv30->num_vtxbufs; i++)
  449.       if (transfer[i])
  450.          pipe_buffer_unmap(pipe, transfer[i]);
  451.  
  452.    nv30->draw_dirty = 0;
  453.    nv30_state_release(nv30);
  454. }
  455.  
  456. static void
  457. nv30_render_destroy(struct vbuf_render *render)
  458. {
  459.    struct nv30_render *r = nv30_render(render);
  460.  
  461.    if (r->transfer)
  462.       pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer);
  463.    pipe_resource_reference(&r->buffer, NULL);
  464.    nouveau_heap_free(&r->vertprog);
  465.    FREE(render);
  466. }
  467.  
  468. static struct vbuf_render *
  469. nv30_render_create(struct nv30_context *nv30)
  470. {
  471.    struct nv30_render *r = CALLOC_STRUCT(nv30_render);
  472.    if (!r)
  473.       return NULL;
  474.  
  475.    r->nv30 = nv30;
  476.    r->offset = 1 * 1024 * 1024;
  477.  
  478.    r->base.max_indices = 16 * 1024;
  479.    r->base.max_vertex_buffer_bytes = r->offset;
  480.  
  481.    r->base.get_vertex_info = nv30_render_get_vertex_info;
  482.    r->base.allocate_vertices = nv30_render_allocate_vertices;
  483.    r->base.map_vertices = nv30_render_map_vertices;
  484.    r->base.unmap_vertices = nv30_render_unmap_vertices;
  485.    r->base.set_primitive = nv30_render_set_primitive;
  486.    r->base.draw_elements = nv30_render_draw_elements;
  487.    r->base.draw_arrays = nv30_render_draw_arrays;
  488.    r->base.release_vertices = nv30_render_release_vertices;
  489.    r->base.destroy = nv30_render_destroy;
  490.    return &r->base;
  491. }
  492.  
  493. void
  494. nv30_draw_init(struct pipe_context *pipe)
  495. {
  496.    struct nv30_context *nv30 = nv30_context(pipe);
  497.    struct vbuf_render *render;
  498.    struct draw_context *draw;
  499.    struct draw_stage *stage;
  500.  
  501.    draw = draw_create(pipe);
  502.    if (!draw)
  503.       return;
  504.  
  505.    render = nv30_render_create(nv30);
  506.    if (!render) {
  507.       draw_destroy(draw);
  508.       return;
  509.    }
  510.  
  511.    stage = draw_vbuf_stage(draw, render);
  512.    if (!stage) {
  513.       render->destroy(render);
  514.       draw_destroy(draw);
  515.       return;
  516.    }
  517.  
  518.    draw_set_render(draw, render);
  519.    draw_set_rasterize_stage(draw, stage);
  520.    draw_wide_line_threshold(draw, 10000000.f);
  521.    draw_wide_point_threshold(draw, 10000000.f);
  522.    draw_wide_point_sprites(draw, TRUE);
  523.    nv30->draw = draw;
  524. }
  525.