Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2010 Christoph Bumiller
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20.  * OTHER DEALINGS IN THE SOFTWARE.
  21.  */
  22.  
  23. #include "pipe/p_defines.h"
  24. #include "util/u_helpers.h"
  25. #include "util/u_inlines.h"
  26. #include "util/u_transfer.h"
  27. #include "util/format_srgb.h"
  28.  
  29. #include "tgsi/tgsi_parse.h"
  30.  
  31. #include "nv50/nv50_stateobj.h"
  32. #include "nv50/nv50_context.h"
  33.  
  34. #include "nv50/nv50_3d.xml.h"
  35. #include "nv50/nv50_texture.xml.h"
  36.  
  37. #include "nouveau_gldefs.h"
  38.  
  39. /* Caveats:
  40.  *  ! pipe_sampler_state.normalized_coords is ignored - rectangle textures will
  41.  *     use non-normalized coordinates, everything else won't
  42.  *    (The relevant bit is in the TIC entry and not the TSC entry.)
  43.  *
  44.  *  ! pipe_sampler_state.seamless_cube_map is ignored - seamless filtering is
  45.  *     always activated on NVA0 +
  46.  *    (Give me the global bit, otherwise it's not worth the CPU work.)
  47.  *
  48.  *  ! pipe_sampler_state.border_color is not swizzled according to the texture
  49.  *     swizzle in pipe_sampler_view
  50.  *    (This will be ugly with indirect independent texture/sampler access,
  51.  *     we'd have to emulate the logic in the shader. GL doesn't have that,
  52.  *     D3D doesn't have swizzle, if we knew what we were implementing we'd be
  53.  *     good.)
  54.  *
  55.  *  ! pipe_rasterizer_state.line_last_pixel is ignored - it is never drawn
  56.  *
  57.  *  ! pipe_rasterizer_state.flatshade_first also applies to QUADS
  58.  *    (There's a GL query for that, forcing an exception is just ridiculous.)
  59.  *
  60.  *  ! pipe_rasterizer_state.sprite_coord_enable is masked with 0xff on NVC0
  61.  *    (The hardware only has 8 slots meant for TexCoord and we have to assign
  62.  *     in advance to maintain elegant separate shader objects.)
  63.  */
  64.  
  65. static INLINE uint32_t
  66. nv50_colormask(unsigned mask)
  67. {
  68.    uint32_t ret = 0;
  69.  
  70.    if (mask & PIPE_MASK_R)
  71.       ret |= 0x0001;
  72.    if (mask & PIPE_MASK_G)
  73.       ret |= 0x0010;
  74.    if (mask & PIPE_MASK_B)
  75.       ret |= 0x0100;
  76.    if (mask & PIPE_MASK_A)
  77.       ret |= 0x1000;
  78.  
  79.    return ret;
  80. }
  81.  
  82. #define NV50_BLEND_FACTOR_CASE(a, b) \
  83.    case PIPE_BLENDFACTOR_##a: return NV50_BLEND_FACTOR_##b
  84.  
  85. static INLINE uint32_t
  86. nv50_blend_fac(unsigned factor)
  87. {
  88.    switch (factor) {
  89.    NV50_BLEND_FACTOR_CASE(ONE, ONE);
  90.    NV50_BLEND_FACTOR_CASE(SRC_COLOR, SRC_COLOR);
  91.    NV50_BLEND_FACTOR_CASE(SRC_ALPHA, SRC_ALPHA);
  92.    NV50_BLEND_FACTOR_CASE(DST_ALPHA, DST_ALPHA);
  93.    NV50_BLEND_FACTOR_CASE(DST_COLOR, DST_COLOR);
  94.    NV50_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE, SRC_ALPHA_SATURATE);
  95.    NV50_BLEND_FACTOR_CASE(CONST_COLOR, CONSTANT_COLOR);
  96.    NV50_BLEND_FACTOR_CASE(CONST_ALPHA, CONSTANT_ALPHA);
  97.    NV50_BLEND_FACTOR_CASE(SRC1_COLOR, SRC1_COLOR);
  98.    NV50_BLEND_FACTOR_CASE(SRC1_ALPHA, SRC1_ALPHA);
  99.    NV50_BLEND_FACTOR_CASE(ZERO, ZERO);
  100.    NV50_BLEND_FACTOR_CASE(INV_SRC_COLOR, ONE_MINUS_SRC_COLOR);
  101.    NV50_BLEND_FACTOR_CASE(INV_SRC_ALPHA, ONE_MINUS_SRC_ALPHA);
  102.    NV50_BLEND_FACTOR_CASE(INV_DST_ALPHA, ONE_MINUS_DST_ALPHA);
  103.    NV50_BLEND_FACTOR_CASE(INV_DST_COLOR, ONE_MINUS_DST_COLOR);
  104.    NV50_BLEND_FACTOR_CASE(INV_CONST_COLOR, ONE_MINUS_CONSTANT_COLOR);
  105.    NV50_BLEND_FACTOR_CASE(INV_CONST_ALPHA, ONE_MINUS_CONSTANT_ALPHA);
  106.    NV50_BLEND_FACTOR_CASE(INV_SRC1_COLOR, ONE_MINUS_SRC1_COLOR);
  107.    NV50_BLEND_FACTOR_CASE(INV_SRC1_ALPHA, ONE_MINUS_SRC1_ALPHA);
  108.    default:
  109.       return NV50_BLEND_FACTOR_ZERO;
  110.    }
  111. }
  112.  
  113. static void *
  114. nv50_blend_state_create(struct pipe_context *pipe,
  115.                         const struct pipe_blend_state *cso)
  116. {
  117.    struct nv50_blend_stateobj *so = CALLOC_STRUCT(nv50_blend_stateobj);
  118.    int i;
  119.    boolean emit_common_func = cso->rt[0].blend_enable;
  120.    uint32_t ms;
  121.  
  122.    if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
  123.       SB_BEGIN_3D(so, BLEND_INDEPENDENT, 1);
  124.       SB_DATA    (so, cso->independent_blend_enable);
  125.    }
  126.  
  127.    so->pipe = *cso;
  128.  
  129.    SB_BEGIN_3D(so, COLOR_MASK_COMMON, 1);
  130.    SB_DATA    (so, !cso->independent_blend_enable);
  131.  
  132.    SB_BEGIN_3D(so, BLEND_ENABLE_COMMON, 1);
  133.    SB_DATA    (so, !cso->independent_blend_enable);
  134.  
  135.    if (cso->independent_blend_enable) {
  136.       SB_BEGIN_3D(so, BLEND_ENABLE(0), 8);
  137.       for (i = 0; i < 8; ++i) {
  138.          SB_DATA(so, cso->rt[i].blend_enable);
  139.          if (cso->rt[i].blend_enable)
  140.             emit_common_func = TRUE;
  141.       }
  142.  
  143.       if (nv50_context(pipe)->screen->tesla->oclass >= NVA3_3D_CLASS) {
  144.          emit_common_func = FALSE;
  145.  
  146.          for (i = 0; i < 8; ++i) {
  147.             if (!cso->rt[i].blend_enable)
  148.                continue;
  149.             SB_BEGIN_3D_(so, NVA3_3D_IBLEND_EQUATION_RGB(i), 6);
  150.             SB_DATA     (so, nvgl_blend_eqn(cso->rt[i].rgb_func));
  151.             SB_DATA     (so, nv50_blend_fac(cso->rt[i].rgb_src_factor));
  152.             SB_DATA     (so, nv50_blend_fac(cso->rt[i].rgb_dst_factor));
  153.             SB_DATA     (so, nvgl_blend_eqn(cso->rt[i].alpha_func));
  154.             SB_DATA     (so, nv50_blend_fac(cso->rt[i].alpha_src_factor));
  155.             SB_DATA     (so, nv50_blend_fac(cso->rt[i].alpha_dst_factor));
  156.          }
  157.       }
  158.    } else {
  159.       SB_BEGIN_3D(so, BLEND_ENABLE(0), 1);
  160.       SB_DATA    (so, cso->rt[0].blend_enable);
  161.    }
  162.  
  163.    if (emit_common_func) {
  164.       SB_BEGIN_3D(so, BLEND_EQUATION_RGB, 5);
  165.       SB_DATA    (so, nvgl_blend_eqn(cso->rt[0].rgb_func));
  166.       SB_DATA    (so, nv50_blend_fac(cso->rt[0].rgb_src_factor));
  167.       SB_DATA    (so, nv50_blend_fac(cso->rt[0].rgb_dst_factor));
  168.       SB_DATA    (so, nvgl_blend_eqn(cso->rt[0].alpha_func));
  169.       SB_DATA    (so, nv50_blend_fac(cso->rt[0].alpha_src_factor));
  170.       SB_BEGIN_3D(so, BLEND_FUNC_DST_ALPHA, 1);
  171.       SB_DATA    (so, nv50_blend_fac(cso->rt[0].alpha_dst_factor));
  172.    }
  173.  
  174.    if (cso->logicop_enable) {
  175.       SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 2);
  176.       SB_DATA    (so, 1);
  177.       SB_DATA    (so, nvgl_logicop_func(cso->logicop_func));
  178.    } else {
  179.       SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 1);
  180.       SB_DATA    (so, 0);
  181.    }
  182.  
  183.    if (cso->independent_blend_enable) {
  184.       SB_BEGIN_3D(so, COLOR_MASK(0), 8);
  185.       for (i = 0; i < 8; ++i)
  186.          SB_DATA(so, nv50_colormask(cso->rt[i].colormask));
  187.    } else {
  188.       SB_BEGIN_3D(so, COLOR_MASK(0), 1);
  189.       SB_DATA    (so, nv50_colormask(cso->rt[0].colormask));
  190.    }
  191.  
  192.    ms = 0;
  193.    if (cso->alpha_to_coverage)
  194.       ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE;
  195.    if (cso->alpha_to_one)
  196.       ms |= NV50_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE;
  197.  
  198.    SB_BEGIN_3D(so, MULTISAMPLE_CTRL, 1);
  199.    SB_DATA    (so, ms);
  200.  
  201.    assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
  202.    return so;
  203. }
  204.  
  205. static void
  206. nv50_blend_state_bind(struct pipe_context *pipe, void *hwcso)
  207. {
  208.    struct nv50_context *nv50 = nv50_context(pipe);
  209.  
  210.    nv50->blend = hwcso;
  211.    nv50->dirty |= NV50_NEW_BLEND;
  212. }
  213.  
  214. static void
  215. nv50_blend_state_delete(struct pipe_context *pipe, void *hwcso)
  216. {
  217.    FREE(hwcso);
  218. }
  219.  
  220. /* NOTE: ignoring line_last_pixel */
  221. static void *
  222. nv50_rasterizer_state_create(struct pipe_context *pipe,
  223.                              const struct pipe_rasterizer_state *cso)
  224. {
  225.    struct nv50_rasterizer_stateobj *so;
  226.    uint32_t reg;
  227.  
  228.    so = CALLOC_STRUCT(nv50_rasterizer_stateobj);
  229.    if (!so)
  230.       return NULL;
  231.    so->pipe = *cso;
  232.  
  233. #ifndef NV50_SCISSORS_CLIPPING
  234.    for (int i = 0; i < NV50_MAX_VIEWPORTS; i++) {
  235.       SB_BEGIN_3D(so, SCISSOR_ENABLE(i), 1);
  236.       SB_DATA    (so, cso->scissor);
  237.    }
  238. #endif
  239.  
  240.    SB_BEGIN_3D(so, SHADE_MODEL, 1);
  241.    SB_DATA    (so, cso->flatshade ? NV50_3D_SHADE_MODEL_FLAT :
  242.                                     NV50_3D_SHADE_MODEL_SMOOTH);
  243.    SB_BEGIN_3D(so, PROVOKING_VERTEX_LAST, 1);
  244.    SB_DATA    (so, !cso->flatshade_first);
  245.    SB_BEGIN_3D(so, VERTEX_TWO_SIDE_ENABLE, 1);
  246.    SB_DATA    (so, cso->light_twoside);
  247.  
  248.    SB_BEGIN_3D(so, FRAG_COLOR_CLAMP_EN, 1);
  249.    SB_DATA    (so, cso->clamp_fragment_color ? 0x11111111 : 0x00000000);
  250.  
  251.    SB_BEGIN_3D(so, MULTISAMPLE_ENABLE, 1);
  252.    SB_DATA    (so, cso->multisample);
  253.  
  254.    SB_BEGIN_3D(so, LINE_WIDTH, 1);
  255.    SB_DATA    (so, fui(cso->line_width));
  256.    SB_BEGIN_3D(so, LINE_SMOOTH_ENABLE, 1);
  257.    SB_DATA    (so, cso->line_smooth);
  258.  
  259.    SB_BEGIN_3D(so, LINE_STIPPLE_ENABLE, 1);
  260.    if (cso->line_stipple_enable) {
  261.       SB_DATA    (so, 1);
  262.       SB_BEGIN_3D(so, LINE_STIPPLE, 1);
  263.       SB_DATA    (so, (cso->line_stipple_pattern << 8) |
  264.                   cso->line_stipple_factor);
  265.    } else {
  266.       SB_DATA    (so, 0);
  267.    }
  268.  
  269.    if (!cso->point_size_per_vertex) {
  270.       SB_BEGIN_3D(so, POINT_SIZE, 1);
  271.       SB_DATA    (so, fui(cso->point_size));
  272.    }
  273.    SB_BEGIN_3D(so, POINT_SPRITE_ENABLE, 1);
  274.    SB_DATA    (so, cso->point_quad_rasterization);
  275.    SB_BEGIN_3D(so, POINT_SMOOTH_ENABLE, 1);
  276.    SB_DATA    (so, cso->point_smooth);
  277.  
  278.    SB_BEGIN_3D(so, POLYGON_MODE_FRONT, 3);
  279.    SB_DATA    (so, nvgl_polygon_mode(cso->fill_front));
  280.    SB_DATA    (so, nvgl_polygon_mode(cso->fill_back));
  281.    SB_DATA    (so, cso->poly_smooth);
  282.  
  283.    SB_BEGIN_3D(so, CULL_FACE_ENABLE, 3);
  284.    SB_DATA    (so, cso->cull_face != PIPE_FACE_NONE);
  285.    SB_DATA    (so, cso->front_ccw ? NV50_3D_FRONT_FACE_CCW :
  286.                                     NV50_3D_FRONT_FACE_CW);
  287.    switch (cso->cull_face) {
  288.    case PIPE_FACE_FRONT_AND_BACK:
  289.       SB_DATA(so, NV50_3D_CULL_FACE_FRONT_AND_BACK);
  290.       break;
  291.    case PIPE_FACE_FRONT:
  292.       SB_DATA(so, NV50_3D_CULL_FACE_FRONT);
  293.       break;
  294.    case PIPE_FACE_BACK:
  295.    default:
  296.      SB_DATA(so, NV50_3D_CULL_FACE_BACK);
  297.      break;
  298.    }
  299.  
  300.    SB_BEGIN_3D(so, POLYGON_STIPPLE_ENABLE, 1);
  301.    SB_DATA    (so, cso->poly_stipple_enable);
  302.    SB_BEGIN_3D(so, POLYGON_OFFSET_POINT_ENABLE, 3);
  303.    SB_DATA    (so, cso->offset_point);
  304.    SB_DATA    (so, cso->offset_line);
  305.    SB_DATA    (so, cso->offset_tri);
  306.  
  307.    if (cso->offset_point || cso->offset_line || cso->offset_tri) {
  308.       SB_BEGIN_3D(so, POLYGON_OFFSET_FACTOR, 1);
  309.       SB_DATA    (so, fui(cso->offset_scale));
  310.       SB_BEGIN_3D(so, POLYGON_OFFSET_UNITS, 1);
  311.       SB_DATA    (so, fui(cso->offset_units * 2.0f));
  312.       SB_BEGIN_3D(so, POLYGON_OFFSET_CLAMP, 1);
  313.       SB_DATA    (so, fui(cso->offset_clamp));
  314.    }
  315.  
  316.    if (cso->depth_clip) {
  317.       reg = 0;
  318.    } else {
  319.       reg =
  320.          NV50_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR |
  321.          NV50_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR |
  322.          NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK1;
  323.    }
  324. #ifndef NV50_SCISSORS_CLIPPING
  325.    reg |=
  326.       NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK7 |
  327.       NV50_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK1;
  328. #endif
  329.    SB_BEGIN_3D(so, VIEW_VOLUME_CLIP_CTRL, 1);
  330.    SB_DATA    (so, reg);
  331.  
  332.    SB_BEGIN_3D(so, DEPTH_CLIP_NEGATIVE_Z, 1);
  333.    SB_DATA    (so, cso->clip_halfz);
  334.  
  335.    SB_BEGIN_3D(so, PIXEL_CENTER_INTEGER, 1);
  336.    SB_DATA    (so, !cso->half_pixel_center);
  337.  
  338.    assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
  339.    return (void *)so;
  340. }
  341.  
  342. static void
  343. nv50_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso)
  344. {
  345.    struct nv50_context *nv50 = nv50_context(pipe);
  346.  
  347.    nv50->rast = hwcso;
  348.    nv50->dirty |= NV50_NEW_RASTERIZER;
  349. }
  350.  
  351. static void
  352. nv50_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
  353. {
  354.    FREE(hwcso);
  355. }
  356.  
  357. static void *
  358. nv50_zsa_state_create(struct pipe_context *pipe,
  359.                       const struct pipe_depth_stencil_alpha_state *cso)
  360. {
  361.    struct nv50_zsa_stateobj *so = CALLOC_STRUCT(nv50_zsa_stateobj);
  362.  
  363.    so->pipe = *cso;
  364.  
  365.    SB_BEGIN_3D(so, DEPTH_WRITE_ENABLE, 1);
  366.    SB_DATA    (so, cso->depth.writemask);
  367.    SB_BEGIN_3D(so, DEPTH_TEST_ENABLE, 1);
  368.    if (cso->depth.enabled) {
  369.       SB_DATA    (so, 1);
  370.       SB_BEGIN_3D(so, DEPTH_TEST_FUNC, 1);
  371.       SB_DATA    (so, nvgl_comparison_op(cso->depth.func));
  372.    } else {
  373.       SB_DATA    (so, 0);
  374.    }
  375.  
  376.    if (cso->stencil[0].enabled) {
  377.       SB_BEGIN_3D(so, STENCIL_ENABLE, 5);
  378.       SB_DATA    (so, 1);
  379.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].fail_op));
  380.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
  381.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
  382.       SB_DATA    (so, nvgl_comparison_op(cso->stencil[0].func));
  383.       SB_BEGIN_3D(so, STENCIL_FRONT_MASK, 2);
  384.       SB_DATA    (so, cso->stencil[0].writemask);
  385.       SB_DATA    (so, cso->stencil[0].valuemask);
  386.    } else {
  387.       SB_BEGIN_3D(so, STENCIL_ENABLE, 1);
  388.       SB_DATA    (so, 0);
  389.    }
  390.  
  391.    if (cso->stencil[1].enabled) {
  392.       assert(cso->stencil[0].enabled);
  393.       SB_BEGIN_3D(so, STENCIL_TWO_SIDE_ENABLE, 5);
  394.       SB_DATA    (so, 1);
  395.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].fail_op));
  396.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
  397.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
  398.       SB_DATA    (so, nvgl_comparison_op(cso->stencil[1].func));
  399.       SB_BEGIN_3D(so, STENCIL_BACK_MASK, 2);
  400.       SB_DATA    (so, cso->stencil[1].writemask);
  401.       SB_DATA    (so, cso->stencil[1].valuemask);
  402.    } else {
  403.       SB_BEGIN_3D(so, STENCIL_TWO_SIDE_ENABLE, 1);
  404.       SB_DATA    (so, 0);
  405.    }
  406.  
  407.    SB_BEGIN_3D(so, ALPHA_TEST_ENABLE, 1);
  408.    if (cso->alpha.enabled) {
  409.       SB_DATA    (so, 1);
  410.       SB_BEGIN_3D(so, ALPHA_TEST_REF, 2);
  411.       SB_DATA    (so, fui(cso->alpha.ref_value));
  412.       SB_DATA    (so, nvgl_comparison_op(cso->alpha.func));
  413.    } else {
  414.       SB_DATA    (so, 0);
  415.    }
  416.  
  417.    assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
  418.    return (void *)so;
  419. }
  420.  
  421. static void
  422. nv50_zsa_state_bind(struct pipe_context *pipe, void *hwcso)
  423. {
  424.    struct nv50_context *nv50 = nv50_context(pipe);
  425.  
  426.    nv50->zsa = hwcso;
  427.    nv50->dirty |= NV50_NEW_ZSA;
  428. }
  429.  
  430. static void
  431. nv50_zsa_state_delete(struct pipe_context *pipe, void *hwcso)
  432. {
  433.    FREE(hwcso);
  434. }
  435.  
  436. /* ====================== SAMPLERS AND TEXTURES ================================
  437.  */
  438.  
  439. #define NV50_TSC_WRAP_CASE(n) \
  440.     case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
  441.  
  442. static INLINE unsigned
  443. nv50_tsc_wrap_mode(unsigned wrap)
  444. {
  445.    switch (wrap) {
  446.    NV50_TSC_WRAP_CASE(REPEAT);
  447.    NV50_TSC_WRAP_CASE(MIRROR_REPEAT);
  448.    NV50_TSC_WRAP_CASE(CLAMP_TO_EDGE);
  449.    NV50_TSC_WRAP_CASE(CLAMP_TO_BORDER);
  450.    NV50_TSC_WRAP_CASE(CLAMP);
  451.    NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_EDGE);
  452.    NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_BORDER);
  453.    NV50_TSC_WRAP_CASE(MIRROR_CLAMP);
  454.    default:
  455.        NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
  456.        return NV50_TSC_WRAP_REPEAT;
  457.    }
  458. }
  459.  
  460. void *
  461. nv50_sampler_state_create(struct pipe_context *pipe,
  462.                           const struct pipe_sampler_state *cso)
  463. {
  464.    struct nv50_tsc_entry *so = MALLOC_STRUCT(nv50_tsc_entry);
  465.    float f[2];
  466.  
  467.    so->id = -1;
  468.  
  469.    so->tsc[0] = (0x00026000 |
  470.                  (nv50_tsc_wrap_mode(cso->wrap_s) << 0) |
  471.                  (nv50_tsc_wrap_mode(cso->wrap_t) << 3) |
  472.                  (nv50_tsc_wrap_mode(cso->wrap_r) << 6));
  473.  
  474.    switch (cso->mag_img_filter) {
  475.    case PIPE_TEX_FILTER_LINEAR:
  476.       so->tsc[1] = NV50_TSC_1_MAGF_LINEAR;
  477.       break;
  478.    case PIPE_TEX_FILTER_NEAREST:
  479.    default:
  480.       so->tsc[1] = NV50_TSC_1_MAGF_NEAREST;
  481.       break;
  482.    }
  483.  
  484.    switch (cso->min_img_filter) {
  485.    case PIPE_TEX_FILTER_LINEAR:
  486.       so->tsc[1] |= NV50_TSC_1_MINF_LINEAR;
  487.       break;
  488.    case PIPE_TEX_FILTER_NEAREST:
  489.    default:
  490.       so->tsc[1] |= NV50_TSC_1_MINF_NEAREST;
  491.       break;
  492.    }
  493.  
  494.    switch (cso->min_mip_filter) {
  495.    case PIPE_TEX_MIPFILTER_LINEAR:
  496.       so->tsc[1] |= NV50_TSC_1_MIPF_LINEAR;
  497.       break;
  498.    case PIPE_TEX_MIPFILTER_NEAREST:
  499.       so->tsc[1] |= NV50_TSC_1_MIPF_NEAREST;
  500.       break;
  501.    case PIPE_TEX_MIPFILTER_NONE:
  502.    default:
  503.       so->tsc[1] |= NV50_TSC_1_MIPF_NONE;
  504.       break;
  505.    }
  506.  
  507.    if (nouveau_screen(pipe->screen)->class_3d >= NVE4_3D_CLASS) {
  508.       if (cso->seamless_cube_map)
  509.          so->tsc[1] |= NVE4_TSC_1_CUBE_SEAMLESS;
  510.       if (!cso->normalized_coords)
  511.          so->tsc[1] |= NVE4_TSC_1_FORCE_NONNORMALIZED_COORDS;
  512.    }
  513.  
  514.    if (cso->max_anisotropy >= 16)
  515.       so->tsc[0] |= (7 << 20);
  516.    else
  517.    if (cso->max_anisotropy >= 12)
  518.       so->tsc[0] |= (6 << 20);
  519.    else {
  520.       so->tsc[0] |= (cso->max_anisotropy >> 1) << 20;
  521.  
  522.       if (cso->max_anisotropy >= 4)
  523.          so->tsc[1] |= NV50_TSC_1_UNKN_ANISO_35;
  524.       else
  525.       if (cso->max_anisotropy >= 2)
  526.          so->tsc[1] |= NV50_TSC_1_UNKN_ANISO_15;
  527.    }
  528.  
  529.    if (cso->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
  530.       /* NOTE: must be deactivated for non-shadow textures */
  531.       so->tsc[0] |= (1 << 9);
  532.       so->tsc[0] |= (nvgl_comparison_op(cso->compare_func) & 0x7) << 10;
  533.    }
  534.  
  535.    f[0] = CLAMP(cso->lod_bias, -16.0f, 15.0f);
  536.    so->tsc[1] |= ((int)(f[0] * 256.0f) & 0x1fff) << 12;
  537.  
  538.    f[0] = CLAMP(cso->min_lod, 0.0f, 15.0f);
  539.    f[1] = CLAMP(cso->max_lod, 0.0f, 15.0f);
  540.    so->tsc[2] =
  541.       (((int)(f[1] * 256.0f) & 0xfff) << 12) | ((int)(f[0] * 256.0f) & 0xfff);
  542.  
  543.    so->tsc[2] |=
  544.       util_format_linear_float_to_srgb_8unorm(cso->border_color.f[0]) << 24;
  545.    so->tsc[3] =
  546.       util_format_linear_float_to_srgb_8unorm(cso->border_color.f[1]) << 12;
  547.    so->tsc[3] |=
  548.       util_format_linear_float_to_srgb_8unorm(cso->border_color.f[2]) << 20;
  549.  
  550.    so->tsc[4] = fui(cso->border_color.f[0]);
  551.    so->tsc[5] = fui(cso->border_color.f[1]);
  552.    so->tsc[6] = fui(cso->border_color.f[2]);
  553.    so->tsc[7] = fui(cso->border_color.f[3]);
  554.  
  555.    return (void *)so;
  556. }
  557.  
  558. static void
  559. nv50_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
  560. {
  561.    unsigned s, i;
  562.  
  563.    for (s = 0; s < 3; ++s) {
  564.       assert(nv50_context(pipe)->num_samplers[s] <= PIPE_MAX_SAMPLERS);
  565.       for (i = 0; i < nv50_context(pipe)->num_samplers[s]; ++i)
  566.          if (nv50_context(pipe)->samplers[s][i] == hwcso)
  567.             nv50_context(pipe)->samplers[s][i] = NULL;
  568.    }
  569.  
  570.    nv50_screen_tsc_free(nv50_context(pipe)->screen, nv50_tsc_entry(hwcso));
  571.  
  572.    FREE(hwcso);
  573. }
  574.  
  575. static INLINE void
  576. nv50_stage_sampler_states_bind(struct nv50_context *nv50, int s,
  577.                                unsigned nr, void **hwcso)
  578. {
  579.    unsigned i;
  580.  
  581.    assert(nr <= PIPE_MAX_SAMPLERS);
  582.    for (i = 0; i < nr; ++i) {
  583.       struct nv50_tsc_entry *old = nv50->samplers[s][i];
  584.  
  585.       nv50->samplers[s][i] = nv50_tsc_entry(hwcso[i]);
  586.       if (old)
  587.          nv50_screen_tsc_unlock(nv50->screen, old);
  588.    }
  589.    assert(nv50->num_samplers[s] <= PIPE_MAX_SAMPLERS);
  590.    for (; i < nv50->num_samplers[s]; ++i) {
  591.       if (nv50->samplers[s][i]) {
  592.          nv50_screen_tsc_unlock(nv50->screen, nv50->samplers[s][i]);
  593.          nv50->samplers[s][i] = NULL;
  594.       }
  595.    }
  596.  
  597.    nv50->num_samplers[s] = nr;
  598.  
  599.    nv50->dirty |= NV50_NEW_SAMPLERS;
  600. }
  601.  
  602. static void
  603. nv50_vp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
  604. {
  605.    nv50_stage_sampler_states_bind(nv50_context(pipe), 0, nr, s);
  606. }
  607.  
  608. static void
  609. nv50_fp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
  610. {
  611.    nv50_stage_sampler_states_bind(nv50_context(pipe), 2, nr, s);
  612. }
  613.  
  614. static void
  615. nv50_gp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
  616. {
  617.    nv50_stage_sampler_states_bind(nv50_context(pipe), 1, nr, s);
  618. }
  619.  
  620. static void
  621. nv50_bind_sampler_states(struct pipe_context *pipe,
  622.                          unsigned shader, unsigned start,
  623.                          unsigned num_samplers, void **samplers)
  624. {
  625.    assert(start == 0);
  626.    switch (shader) {
  627.    case PIPE_SHADER_VERTEX:
  628.       nv50_vp_sampler_states_bind(pipe, num_samplers, samplers);
  629.       break;
  630.    case PIPE_SHADER_GEOMETRY:
  631.       nv50_gp_sampler_states_bind(pipe, num_samplers, samplers);
  632.       break;
  633.    case PIPE_SHADER_FRAGMENT:
  634.       nv50_fp_sampler_states_bind(pipe, num_samplers, samplers);
  635.       break;
  636.    }
  637. }
  638.  
  639.  
  640.  
  641. /* NOTE: only called when not referenced anywhere, won't be bound */
  642. static void
  643. nv50_sampler_view_destroy(struct pipe_context *pipe,
  644.                           struct pipe_sampler_view *view)
  645. {
  646.    pipe_resource_reference(&view->texture, NULL);
  647.  
  648.    nv50_screen_tic_free(nv50_context(pipe)->screen, nv50_tic_entry(view));
  649.  
  650.    FREE(nv50_tic_entry(view));
  651. }
  652.  
  653. static INLINE void
  654. nv50_stage_set_sampler_views(struct nv50_context *nv50, int s,
  655.                              unsigned nr,
  656.                              struct pipe_sampler_view **views)
  657. {
  658.    unsigned i;
  659.  
  660.    assert(nr <= PIPE_MAX_SAMPLERS);
  661.    for (i = 0; i < nr; ++i) {
  662.       struct nv50_tic_entry *old = nv50_tic_entry(nv50->textures[s][i]);
  663.       if (old)
  664.          nv50_screen_tic_unlock(nv50->screen, old);
  665.  
  666.       pipe_sampler_view_reference(&nv50->textures[s][i], views[i]);
  667.    }
  668.  
  669.    assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
  670.    for (i = nr; i < nv50->num_textures[s]; ++i) {
  671.       struct nv50_tic_entry *old = nv50_tic_entry(nv50->textures[s][i]);
  672.       if (!old)
  673.          continue;
  674.       nv50_screen_tic_unlock(nv50->screen, old);
  675.  
  676.       pipe_sampler_view_reference(&nv50->textures[s][i], NULL);
  677.    }
  678.  
  679.    nv50->num_textures[s] = nr;
  680.  
  681.    nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_TEXTURES);
  682.  
  683.    nv50->dirty |= NV50_NEW_TEXTURES;
  684. }
  685.  
  686. static void
  687. nv50_set_sampler_views(struct pipe_context *pipe, unsigned shader,
  688.                        unsigned start, unsigned nr,
  689.                        struct pipe_sampler_view **views)
  690. {
  691.    assert(start == 0);
  692.    switch (shader) {
  693.    case PIPE_SHADER_VERTEX:
  694.       nv50_stage_set_sampler_views(nv50_context(pipe), 0, nr, views);
  695.       break;
  696.    case PIPE_SHADER_GEOMETRY:
  697.       nv50_stage_set_sampler_views(nv50_context(pipe), 1, nr, views);
  698.       break;
  699.    case PIPE_SHADER_FRAGMENT:
  700.       nv50_stage_set_sampler_views(nv50_context(pipe), 2, nr, views);
  701.       break;
  702.    default:
  703.       ;
  704.    }
  705. }
  706.  
  707.  
  708.  
  709. /* ============================= SHADERS =======================================
  710.  */
  711.  
  712. static void *
  713. nv50_sp_state_create(struct pipe_context *pipe,
  714.                      const struct pipe_shader_state *cso, unsigned type)
  715. {
  716.    struct nv50_program *prog;
  717.  
  718.    prog = CALLOC_STRUCT(nv50_program);
  719.    if (!prog)
  720.       return NULL;
  721.  
  722.    prog->type = type;
  723.    prog->pipe.tokens = tgsi_dup_tokens(cso->tokens);
  724.  
  725.    if (cso->stream_output.num_outputs)
  726.       prog->pipe.stream_output = cso->stream_output;
  727.  
  728.    return (void *)prog;
  729. }
  730.  
  731. static void
  732. nv50_sp_state_delete(struct pipe_context *pipe, void *hwcso)
  733. {
  734.    struct nv50_program *prog = (struct nv50_program *)hwcso;
  735.  
  736.    nv50_program_destroy(nv50_context(pipe), prog);
  737.  
  738.    FREE((void *)prog->pipe.tokens);
  739.    FREE(prog);
  740. }
  741.  
  742. static void *
  743. nv50_vp_state_create(struct pipe_context *pipe,
  744.                      const struct pipe_shader_state *cso)
  745. {
  746.    return nv50_sp_state_create(pipe, cso, PIPE_SHADER_VERTEX);
  747. }
  748.  
  749. static void
  750. nv50_vp_state_bind(struct pipe_context *pipe, void *hwcso)
  751. {
  752.     struct nv50_context *nv50 = nv50_context(pipe);
  753.  
  754.     nv50->vertprog = hwcso;
  755.     nv50->dirty |= NV50_NEW_VERTPROG;
  756. }
  757.  
  758. static void *
  759. nv50_fp_state_create(struct pipe_context *pipe,
  760.                      const struct pipe_shader_state *cso)
  761. {
  762.    return nv50_sp_state_create(pipe, cso, PIPE_SHADER_FRAGMENT);
  763. }
  764.  
  765. static void
  766. nv50_fp_state_bind(struct pipe_context *pipe, void *hwcso)
  767. {
  768.     struct nv50_context *nv50 = nv50_context(pipe);
  769.  
  770.     nv50->fragprog = hwcso;
  771.     nv50->dirty |= NV50_NEW_FRAGPROG;
  772. }
  773.  
  774. static void *
  775. nv50_gp_state_create(struct pipe_context *pipe,
  776.                      const struct pipe_shader_state *cso)
  777. {
  778.    return nv50_sp_state_create(pipe, cso, PIPE_SHADER_GEOMETRY);
  779. }
  780.  
  781. static void
  782. nv50_gp_state_bind(struct pipe_context *pipe, void *hwcso)
  783. {
  784.     struct nv50_context *nv50 = nv50_context(pipe);
  785.  
  786.     nv50->gmtyprog = hwcso;
  787.     nv50->dirty |= NV50_NEW_GMTYPROG;
  788. }
  789.  
  790. static void
  791. nv50_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
  792.                          struct pipe_constant_buffer *cb)
  793. {
  794.    struct nv50_context *nv50 = nv50_context(pipe);
  795.    struct pipe_resource *res = cb ? cb->buffer : NULL;
  796.    const unsigned s = nv50_context_shader_stage(shader);
  797.    const unsigned i = index;
  798.  
  799.    if (shader == PIPE_SHADER_COMPUTE)
  800.       return;
  801.  
  802.    assert(i < NV50_MAX_PIPE_CONSTBUFS);
  803.    if (nv50->constbuf[s][i].user)
  804.       nv50->constbuf[s][i].u.buf = NULL;
  805.    else
  806.    if (nv50->constbuf[s][i].u.buf)
  807.       nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_CB(s, i));
  808.  
  809.    pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);
  810.  
  811.    nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE;
  812.    if (nv50->constbuf[s][i].user) {
  813.       nv50->constbuf[s][i].u.data = cb->user_buffer;
  814.       nv50->constbuf[s][i].size = cb->buffer_size;
  815.       nv50->constbuf_valid[s] |= 1 << i;
  816.    } else
  817.    if (res) {
  818.       nv50->constbuf[s][i].offset = cb->buffer_offset;
  819.       nv50->constbuf[s][i].size = align(cb->buffer_size, 0x100);
  820.       nv50->constbuf_valid[s] |= 1 << i;
  821.    } else {
  822.       nv50->constbuf_valid[s] &= ~(1 << i);
  823.    }
  824.    nv50->constbuf_dirty[s] |= 1 << i;
  825.  
  826.    nv50->dirty |= NV50_NEW_CONSTBUF;
  827. }
  828.  
  829. /* =============================================================================
  830.  */
  831.  
  832. static void
  833. nv50_set_blend_color(struct pipe_context *pipe,
  834.                      const struct pipe_blend_color *bcol)
  835. {
  836.    struct nv50_context *nv50 = nv50_context(pipe);
  837.  
  838.    nv50->blend_colour = *bcol;
  839.    nv50->dirty |= NV50_NEW_BLEND_COLOUR;
  840. }
  841.  
  842. static void
  843. nv50_set_stencil_ref(struct pipe_context *pipe,
  844.                      const struct pipe_stencil_ref *sr)
  845. {
  846.    struct nv50_context *nv50 = nv50_context(pipe);
  847.  
  848.    nv50->stencil_ref = *sr;
  849.    nv50->dirty |= NV50_NEW_STENCIL_REF;
  850. }
  851.  
  852. static void
  853. nv50_set_clip_state(struct pipe_context *pipe,
  854.                     const struct pipe_clip_state *clip)
  855. {
  856.    struct nv50_context *nv50 = nv50_context(pipe);
  857.  
  858.    memcpy(nv50->clip.ucp, clip->ucp, sizeof(clip->ucp));
  859.  
  860.    nv50->dirty |= NV50_NEW_CLIP;
  861. }
  862.  
  863. static void
  864. nv50_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
  865. {
  866.    struct nv50_context *nv50 = nv50_context(pipe);
  867.  
  868.    nv50->sample_mask = sample_mask;
  869.    nv50->dirty |= NV50_NEW_SAMPLE_MASK;
  870. }
  871.  
  872. static void
  873. nv50_set_min_samples(struct pipe_context *pipe, unsigned min_samples)
  874. {
  875.    struct nv50_context *nv50 = nv50_context(pipe);
  876.  
  877.    if (nv50->min_samples != min_samples) {
  878.       nv50->min_samples = min_samples;
  879.       nv50->dirty |= NV50_NEW_MIN_SAMPLES;
  880.    }
  881. }
  882.  
  883. static void
  884. nv50_set_framebuffer_state(struct pipe_context *pipe,
  885.                            const struct pipe_framebuffer_state *fb)
  886. {
  887.    struct nv50_context *nv50 = nv50_context(pipe);
  888.    unsigned i;
  889.  
  890.    nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_FB);
  891.  
  892.    for (i = 0; i < fb->nr_cbufs; ++i)
  893.       pipe_surface_reference(&nv50->framebuffer.cbufs[i], fb->cbufs[i]);
  894.    for (; i < nv50->framebuffer.nr_cbufs; ++i)
  895.       pipe_surface_reference(&nv50->framebuffer.cbufs[i], NULL);
  896.  
  897.    nv50->framebuffer.nr_cbufs = fb->nr_cbufs;
  898.  
  899.    nv50->framebuffer.width = fb->width;
  900.    nv50->framebuffer.height = fb->height;
  901.  
  902.    pipe_surface_reference(&nv50->framebuffer.zsbuf, fb->zsbuf);
  903.  
  904.    nv50->dirty |= NV50_NEW_FRAMEBUFFER;
  905. }
  906.  
  907. static void
  908. nv50_set_polygon_stipple(struct pipe_context *pipe,
  909.                          const struct pipe_poly_stipple *stipple)
  910. {
  911.    struct nv50_context *nv50 = nv50_context(pipe);
  912.  
  913.    nv50->stipple = *stipple;
  914.    nv50->dirty |= NV50_NEW_STIPPLE;
  915. }
  916.  
  917. static void
  918. nv50_set_scissor_states(struct pipe_context *pipe,
  919.                         unsigned start_slot,
  920.                         unsigned num_scissors,
  921.                         const struct pipe_scissor_state *scissor)
  922. {
  923.    struct nv50_context *nv50 = nv50_context(pipe);
  924.    int i;
  925.  
  926.    assert(start_slot + num_scissors <= NV50_MAX_VIEWPORTS);
  927.    for (i = 0; i < num_scissors; i++) {
  928.       if (!memcmp(&nv50->scissors[start_slot + i], &scissor[i], sizeof(*scissor)))
  929.          continue;
  930.       nv50->scissors[start_slot + i] = scissor[i];
  931.       nv50->scissors_dirty |= 1 << (start_slot + i);
  932.       nv50->dirty |= NV50_NEW_SCISSOR;
  933.    }
  934. }
  935.  
  936. static void
  937. nv50_set_viewport_states(struct pipe_context *pipe,
  938.                          unsigned start_slot,
  939.                          unsigned num_viewports,
  940.                          const struct pipe_viewport_state *vpt)
  941. {
  942.    struct nv50_context *nv50 = nv50_context(pipe);
  943.    int i;
  944.  
  945.    assert(start_slot + num_viewports <= NV50_MAX_VIEWPORTS);
  946.    for (i = 0; i < num_viewports; i++) {
  947.       if (!memcmp(&nv50->viewports[start_slot + i], &vpt[i], sizeof(*vpt)))
  948.          continue;
  949.       nv50->viewports[start_slot + i] = vpt[i];
  950.       nv50->viewports_dirty |= 1 << (start_slot + i);
  951.       nv50->dirty |= NV50_NEW_VIEWPORT;
  952.    }
  953. }
  954.  
  955. static void
  956. nv50_set_vertex_buffers(struct pipe_context *pipe,
  957.                         unsigned start_slot, unsigned count,
  958.                         const struct pipe_vertex_buffer *vb)
  959. {
  960.    struct nv50_context *nv50 = nv50_context(pipe);
  961.    unsigned i;
  962.  
  963.    util_set_vertex_buffers_count(nv50->vtxbuf, &nv50->num_vtxbufs, vb,
  964.                                  start_slot, count);
  965.  
  966.    if (!vb) {
  967.       nv50->vbo_user &= ~(((1ull << count) - 1) << start_slot);
  968.       nv50->vbo_constant &= ~(((1ull << count) - 1) << start_slot);
  969.       return;
  970.    }
  971.  
  972.    for (i = 0; i < count; ++i) {
  973.       unsigned dst_index = start_slot + i;
  974.  
  975.       if (!vb[i].buffer && vb[i].user_buffer) {
  976.          nv50->vbo_user |= 1 << dst_index;
  977.          if (!vb[i].stride)
  978.             nv50->vbo_constant |= 1 << dst_index;
  979.          else
  980.             nv50->vbo_constant &= ~(1 << dst_index);
  981.       } else {
  982.          nv50->vbo_user &= ~(1 << dst_index);
  983.          nv50->vbo_constant &= ~(1 << dst_index);
  984.       }
  985.    }
  986.  
  987.    nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_VERTEX);
  988.  
  989.    nv50->dirty |= NV50_NEW_ARRAYS;
  990. }
  991.  
  992. static void
  993. nv50_set_index_buffer(struct pipe_context *pipe,
  994.                       const struct pipe_index_buffer *ib)
  995. {
  996.    struct nv50_context *nv50 = nv50_context(pipe);
  997.  
  998.    if (nv50->idxbuf.buffer)
  999.       nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_INDEX);
  1000.  
  1001.    if (ib) {
  1002.       pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
  1003.       nv50->idxbuf.index_size = ib->index_size;
  1004.       if (ib->buffer) {
  1005.          nv50->idxbuf.offset = ib->offset;
  1006.          BCTX_REFN(nv50->bufctx_3d, INDEX, nv04_resource(ib->buffer), RD);
  1007.       } else {
  1008.          nv50->idxbuf.user_buffer = ib->user_buffer;
  1009.       }
  1010.    } else {
  1011.       pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
  1012.    }
  1013. }
  1014.  
  1015. static void
  1016. nv50_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
  1017. {
  1018.    struct nv50_context *nv50 = nv50_context(pipe);
  1019.  
  1020.    nv50->vertex = hwcso;
  1021.    nv50->dirty |= NV50_NEW_VERTEX;
  1022. }
  1023.  
  1024. static struct pipe_stream_output_target *
  1025. nv50_so_target_create(struct pipe_context *pipe,
  1026.                       struct pipe_resource *res,
  1027.                       unsigned offset, unsigned size)
  1028. {
  1029.    struct nv04_resource *buf = (struct nv04_resource *)res;
  1030.    struct nv50_so_target *targ = MALLOC_STRUCT(nv50_so_target);
  1031.    if (!targ)
  1032.       return NULL;
  1033.  
  1034.    if (nouveau_context(pipe)->screen->class_3d >= NVA0_3D_CLASS) {
  1035.       targ->pq = pipe->create_query(pipe,
  1036.                                     NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET, 0);
  1037.       if (!targ->pq) {
  1038.          FREE(targ);
  1039.          return NULL;
  1040.       }
  1041.    } else {
  1042.       targ->pq = NULL;
  1043.    }
  1044.    targ->clean = TRUE;
  1045.  
  1046.    targ->pipe.buffer_size = size;
  1047.    targ->pipe.buffer_offset = offset;
  1048.    targ->pipe.context = pipe;
  1049.    targ->pipe.buffer = NULL;
  1050.    pipe_resource_reference(&targ->pipe.buffer, res);
  1051.    pipe_reference_init(&targ->pipe.reference, 1);
  1052.  
  1053.    assert(buf->base.target == PIPE_BUFFER);
  1054.    util_range_add(&buf->valid_buffer_range, offset, offset + size);
  1055.  
  1056.    return &targ->pipe;
  1057. }
  1058.  
  1059. static void
  1060. nv50_so_target_destroy(struct pipe_context *pipe,
  1061.                        struct pipe_stream_output_target *ptarg)
  1062. {
  1063.    struct nv50_so_target *targ = nv50_so_target(ptarg);
  1064.    if (targ->pq)
  1065.       pipe->destroy_query(pipe, targ->pq);
  1066.    pipe_resource_reference(&targ->pipe.buffer, NULL);
  1067.    FREE(targ);
  1068. }
  1069.  
  1070. static void
  1071. nv50_set_stream_output_targets(struct pipe_context *pipe,
  1072.                                unsigned num_targets,
  1073.                                struct pipe_stream_output_target **targets,
  1074.                                const unsigned *offsets)
  1075. {
  1076.    struct nv50_context *nv50 = nv50_context(pipe);
  1077.    unsigned i;
  1078.    boolean serialize = TRUE;
  1079.    const boolean can_resume = nv50->screen->base.class_3d >= NVA0_3D_CLASS;
  1080.  
  1081.    assert(num_targets <= 4);
  1082.  
  1083.    for (i = 0; i < num_targets; ++i) {
  1084.       const boolean changed = nv50->so_target[i] != targets[i];
  1085.       const boolean append = (offsets[i] == (unsigned)-1);
  1086.       if (!changed && append)
  1087.          continue;
  1088.       nv50->so_targets_dirty |= 1 << i;
  1089.  
  1090.       if (can_resume && changed && nv50->so_target[i]) {
  1091.          nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
  1092.          serialize = FALSE;
  1093.       }
  1094.  
  1095.       if (targets[i] && !append)
  1096.          nv50_so_target(targets[i])->clean = TRUE;
  1097.  
  1098.       pipe_so_target_reference(&nv50->so_target[i], targets[i]);
  1099.    }
  1100.    for (; i < nv50->num_so_targets; ++i) {
  1101.       if (can_resume && nv50->so_target[i]) {
  1102.          nva0_so_target_save_offset(pipe, nv50->so_target[i], i, serialize);
  1103.          serialize = FALSE;
  1104.       }
  1105.       pipe_so_target_reference(&nv50->so_target[i], NULL);
  1106.       nv50->so_targets_dirty |= 1 << i;
  1107.    }
  1108.    nv50->num_so_targets = num_targets;
  1109.  
  1110.    if (nv50->so_targets_dirty)
  1111.       nv50->dirty |= NV50_NEW_STRMOUT;
  1112. }
  1113.  
  1114. void
  1115. nv50_init_state_functions(struct nv50_context *nv50)
  1116. {
  1117.    struct pipe_context *pipe = &nv50->base.pipe;
  1118.  
  1119.    pipe->create_blend_state = nv50_blend_state_create;
  1120.    pipe->bind_blend_state = nv50_blend_state_bind;
  1121.    pipe->delete_blend_state = nv50_blend_state_delete;
  1122.  
  1123.    pipe->create_rasterizer_state = nv50_rasterizer_state_create;
  1124.    pipe->bind_rasterizer_state = nv50_rasterizer_state_bind;
  1125.    pipe->delete_rasterizer_state = nv50_rasterizer_state_delete;
  1126.  
  1127.    pipe->create_depth_stencil_alpha_state = nv50_zsa_state_create;
  1128.    pipe->bind_depth_stencil_alpha_state = nv50_zsa_state_bind;
  1129.    pipe->delete_depth_stencil_alpha_state = nv50_zsa_state_delete;
  1130.  
  1131.    pipe->create_sampler_state = nv50_sampler_state_create;
  1132.    pipe->delete_sampler_state = nv50_sampler_state_delete;
  1133.    pipe->bind_sampler_states   = nv50_bind_sampler_states;
  1134.  
  1135.    pipe->create_sampler_view = nv50_create_sampler_view;
  1136.    pipe->sampler_view_destroy = nv50_sampler_view_destroy;
  1137.    pipe->set_sampler_views = nv50_set_sampler_views;
  1138.  
  1139.    pipe->create_vs_state = nv50_vp_state_create;
  1140.    pipe->create_fs_state = nv50_fp_state_create;
  1141.    pipe->create_gs_state = nv50_gp_state_create;
  1142.    pipe->bind_vs_state = nv50_vp_state_bind;
  1143.    pipe->bind_fs_state = nv50_fp_state_bind;
  1144.    pipe->bind_gs_state = nv50_gp_state_bind;
  1145.    pipe->delete_vs_state = nv50_sp_state_delete;
  1146.    pipe->delete_fs_state = nv50_sp_state_delete;
  1147.    pipe->delete_gs_state = nv50_sp_state_delete;
  1148.  
  1149.    pipe->set_blend_color = nv50_set_blend_color;
  1150.    pipe->set_stencil_ref = nv50_set_stencil_ref;
  1151.    pipe->set_clip_state = nv50_set_clip_state;
  1152.    pipe->set_sample_mask = nv50_set_sample_mask;
  1153.    pipe->set_min_samples = nv50_set_min_samples;
  1154.    pipe->set_constant_buffer = nv50_set_constant_buffer;
  1155.    pipe->set_framebuffer_state = nv50_set_framebuffer_state;
  1156.    pipe->set_polygon_stipple = nv50_set_polygon_stipple;
  1157.    pipe->set_scissor_states = nv50_set_scissor_states;
  1158.    pipe->set_viewport_states = nv50_set_viewport_states;
  1159.  
  1160.    pipe->create_vertex_elements_state = nv50_vertex_state_create;
  1161.    pipe->delete_vertex_elements_state = nv50_vertex_state_delete;
  1162.    pipe->bind_vertex_elements_state = nv50_vertex_state_bind;
  1163.  
  1164.    pipe->set_vertex_buffers = nv50_set_vertex_buffers;
  1165.    pipe->set_index_buffer = nv50_set_index_buffer;
  1166.  
  1167.    pipe->create_stream_output_target = nv50_so_target_create;
  1168.    pipe->stream_output_target_destroy = nv50_so_target_destroy;
  1169.    pipe->set_stream_output_targets = nv50_set_stream_output_targets;
  1170.  
  1171.    nv50->sample_mask = ~0;
  1172.    nv50->min_samples = 1;
  1173. }
  1174.