Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2010 Christoph Bumiller
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20.  * OTHER DEALINGS IN THE SOFTWARE.
  21.  */
  22.  
  23. #include "pipe/p_defines.h"
  24. #include "util/u_helpers.h"
  25. #include "util/u_inlines.h"
  26. #include "util/u_transfer.h"
  27.  
  28. #include "tgsi/tgsi_parse.h"
  29.  
  30. #include "nvc0/nvc0_stateobj.h"
  31. #include "nvc0/nvc0_context.h"
  32.  
  33. #include "nvc0/nvc0_3d.xml.h"
  34. #include "nv50/nv50_texture.xml.h"
  35.  
  36. #include "nouveau_gldefs.h"
  37.  
  38. static INLINE uint32_t
  39. nvc0_colormask(unsigned mask)
  40. {
  41.     uint32_t ret = 0;
  42.  
  43.     if (mask & PIPE_MASK_R)
  44.         ret |= 0x0001;
  45.     if (mask & PIPE_MASK_G)
  46.         ret |= 0x0010;
  47.     if (mask & PIPE_MASK_B)
  48.         ret |= 0x0100;
  49.     if (mask & PIPE_MASK_A)
  50.         ret |= 0x1000;
  51.  
  52.     return ret;
  53. }
  54.  
  55. #define NVC0_BLEND_FACTOR_CASE(a, b) \
  56.    case PIPE_BLENDFACTOR_##a: return NV50_BLEND_FACTOR_##b
  57.  
  58. static INLINE uint32_t
  59. nvc0_blend_fac(unsigned factor)
  60. {
  61.    switch (factor) {
  62.    NVC0_BLEND_FACTOR_CASE(ONE, ONE);
  63.    NVC0_BLEND_FACTOR_CASE(SRC_COLOR, SRC_COLOR);
  64.    NVC0_BLEND_FACTOR_CASE(SRC_ALPHA, SRC_ALPHA);
  65.    NVC0_BLEND_FACTOR_CASE(DST_ALPHA, DST_ALPHA);
  66.    NVC0_BLEND_FACTOR_CASE(DST_COLOR, DST_COLOR);
  67.    NVC0_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE, SRC_ALPHA_SATURATE);
  68.    NVC0_BLEND_FACTOR_CASE(CONST_COLOR, CONSTANT_COLOR);
  69.    NVC0_BLEND_FACTOR_CASE(CONST_ALPHA, CONSTANT_ALPHA);
  70.    NVC0_BLEND_FACTOR_CASE(SRC1_COLOR, SRC1_COLOR);
  71.    NVC0_BLEND_FACTOR_CASE(SRC1_ALPHA, SRC1_ALPHA);
  72.    NVC0_BLEND_FACTOR_CASE(ZERO, ZERO);
  73.    NVC0_BLEND_FACTOR_CASE(INV_SRC_COLOR, ONE_MINUS_SRC_COLOR);
  74.    NVC0_BLEND_FACTOR_CASE(INV_SRC_ALPHA, ONE_MINUS_SRC_ALPHA);
  75.    NVC0_BLEND_FACTOR_CASE(INV_DST_ALPHA, ONE_MINUS_DST_ALPHA);
  76.    NVC0_BLEND_FACTOR_CASE(INV_DST_COLOR, ONE_MINUS_DST_COLOR);
  77.    NVC0_BLEND_FACTOR_CASE(INV_CONST_COLOR, ONE_MINUS_CONSTANT_COLOR);
  78.    NVC0_BLEND_FACTOR_CASE(INV_CONST_ALPHA, ONE_MINUS_CONSTANT_ALPHA);
  79.    NVC0_BLEND_FACTOR_CASE(INV_SRC1_COLOR, ONE_MINUS_SRC1_COLOR);
  80.    NVC0_BLEND_FACTOR_CASE(INV_SRC1_ALPHA, ONE_MINUS_SRC1_ALPHA);
  81.    default:
  82.       return NV50_BLEND_FACTOR_ZERO;
  83.    }
  84. }
  85.  
  86. static void *
  87. nvc0_blend_state_create(struct pipe_context *pipe,
  88.                         const struct pipe_blend_state *cso)
  89. {
  90.    struct nvc0_blend_stateobj *so = CALLOC_STRUCT(nvc0_blend_stateobj);
  91.    int i;
  92.    int r; /* reference */
  93.    uint32_t ms;
  94.    uint8_t blend_en = 0;
  95.    boolean indep_masks = FALSE;
  96.    boolean indep_funcs = FALSE;
  97.  
  98.    so->pipe = *cso;
  99.  
  100.    /* check which states actually have differing values */
  101.    if (cso->independent_blend_enable) {
  102.       for (r = 0; r < 8 && !cso->rt[r].blend_enable; ++r);
  103.       blend_en |= 1 << r;
  104.       for (i = r + 1; i < 8; ++i) {
  105.          if (!cso->rt[i].blend_enable)
  106.             continue;
  107.          blend_en |= 1 << i;
  108.          if (cso->rt[i].rgb_func != cso->rt[r].rgb_func ||
  109.              cso->rt[i].rgb_src_factor != cso->rt[r].rgb_src_factor ||
  110.              cso->rt[i].rgb_dst_factor != cso->rt[r].rgb_dst_factor ||
  111.              cso->rt[i].alpha_func != cso->rt[r].alpha_func ||
  112.              cso->rt[i].alpha_src_factor != cso->rt[r].alpha_src_factor ||
  113.              cso->rt[i].alpha_dst_factor != cso->rt[r].alpha_dst_factor) {
  114.             indep_funcs = TRUE;
  115.             break;
  116.          }
  117.       }
  118.       for (; i < 8; ++i)
  119.          blend_en |= (cso->rt[i].blend_enable ? 1 : 0) << i;
  120.  
  121.       for (i = 1; i < 8; ++i) {
  122.          if (cso->rt[i].colormask != cso->rt[0].colormask) {
  123.             indep_masks = TRUE;
  124.             break;
  125.          }
  126.       }
  127.    } else {
  128.       r = 0;
  129.       if (cso->rt[0].blend_enable)
  130.          blend_en = 0xff;
  131.    }
  132.  
  133.    if (cso->logicop_enable) {
  134.       SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 2);
  135.       SB_DATA    (so, 1);
  136.       SB_DATA    (so, nvgl_logicop_func(cso->logicop_func));
  137.  
  138.       SB_IMMED_3D(so, MACRO_BLEND_ENABLES, 0);
  139.    } else {
  140.       SB_IMMED_3D(so, LOGIC_OP_ENABLE, 0);
  141.  
  142.       SB_IMMED_3D(so, BLEND_INDEPENDENT, indep_funcs);
  143.       SB_IMMED_3D(so, MACRO_BLEND_ENABLES, blend_en);
  144.       if (indep_funcs) {
  145.          for (i = 0; i < 8; ++i) {
  146.             if (cso->rt[i].blend_enable) {
  147.                SB_BEGIN_3D(so, IBLEND_EQUATION_RGB(i), 6);
  148.                SB_DATA    (so, nvgl_blend_eqn(cso->rt[i].rgb_func));
  149.                SB_DATA    (so, nvc0_blend_fac(cso->rt[i].rgb_src_factor));
  150.                SB_DATA    (so, nvc0_blend_fac(cso->rt[i].rgb_dst_factor));
  151.                SB_DATA    (so, nvgl_blend_eqn(cso->rt[i].alpha_func));
  152.                SB_DATA    (so, nvc0_blend_fac(cso->rt[i].alpha_src_factor));
  153.                SB_DATA    (so, nvc0_blend_fac(cso->rt[i].alpha_dst_factor));
  154.             }
  155.          }
  156.       } else
  157.       if (blend_en) {
  158.          SB_BEGIN_3D(so, BLEND_EQUATION_RGB, 5);
  159.          SB_DATA    (so, nvgl_blend_eqn(cso->rt[r].rgb_func));
  160.          SB_DATA    (so, nvc0_blend_fac(cso->rt[r].rgb_src_factor));
  161.          SB_DATA    (so, nvc0_blend_fac(cso->rt[r].rgb_dst_factor));
  162.          SB_DATA    (so, nvgl_blend_eqn(cso->rt[r].alpha_func));
  163.          SB_DATA    (so, nvc0_blend_fac(cso->rt[r].alpha_src_factor));
  164.          SB_BEGIN_3D(so, BLEND_FUNC_DST_ALPHA, 1);
  165.          SB_DATA    (so, nvc0_blend_fac(cso->rt[r].alpha_dst_factor));
  166.       }
  167.  
  168.       SB_IMMED_3D(so, COLOR_MASK_COMMON, !indep_masks);
  169.       if (indep_masks) {
  170.          SB_BEGIN_3D(so, COLOR_MASK(0), 8);
  171.          for (i = 0; i < 8; ++i)
  172.             SB_DATA(so, nvc0_colormask(cso->rt[i].colormask));
  173.       } else {
  174.          SB_BEGIN_3D(so, COLOR_MASK(0), 1);
  175.          SB_DATA    (so, nvc0_colormask(cso->rt[0].colormask));
  176.       }
  177.    }
  178.  
  179.    ms = 0;
  180.    if (cso->alpha_to_coverage)
  181.       ms |= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE;
  182.    if (cso->alpha_to_one)
  183.       ms |= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE;
  184.  
  185.    SB_BEGIN_3D(so, MULTISAMPLE_CTRL, 1);
  186.    SB_DATA    (so, ms);
  187.  
  188.    assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
  189.    return so;
  190. }
  191.  
  192. static void
  193. nvc0_blend_state_bind(struct pipe_context *pipe, void *hwcso)
  194. {
  195.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  196.  
  197.     nvc0->blend = hwcso;
  198.     nvc0->dirty |= NVC0_NEW_BLEND;
  199. }
  200.  
  201. static void
  202. nvc0_blend_state_delete(struct pipe_context *pipe, void *hwcso)
  203. {
  204.     FREE(hwcso);
  205. }
  206.  
  207. /* NOTE: ignoring line_last_pixel */
  208. static void *
  209. nvc0_rasterizer_state_create(struct pipe_context *pipe,
  210.                              const struct pipe_rasterizer_state *cso)
  211. {
  212.     struct nvc0_rasterizer_stateobj *so;
  213.     uint32_t reg;
  214.  
  215.     so = CALLOC_STRUCT(nvc0_rasterizer_stateobj);
  216.     if (!so)
  217.         return NULL;
  218.     so->pipe = *cso;
  219.  
  220.     /* Scissor enables are handled in scissor state, we will not want to
  221.      * always emit 16 commands, one for each scissor rectangle, here.
  222.      */
  223.  
  224.     SB_BEGIN_3D(so, SHADE_MODEL, 1);
  225.     SB_DATA    (so, cso->flatshade ? NVC0_3D_SHADE_MODEL_FLAT :
  226.                                      NVC0_3D_SHADE_MODEL_SMOOTH);
  227.     SB_IMMED_3D(so, PROVOKING_VERTEX_LAST, !cso->flatshade_first);
  228.     SB_IMMED_3D(so, VERTEX_TWO_SIDE_ENABLE, cso->light_twoside);
  229.  
  230.     SB_IMMED_3D(so, VERT_COLOR_CLAMP_EN, cso->clamp_vertex_color);
  231.     SB_BEGIN_3D(so, FRAG_COLOR_CLAMP_EN, 1);
  232.     SB_DATA    (so, cso->clamp_fragment_color ? 0x11111111 : 0x00000000);
  233.  
  234.     SB_IMMED_3D(so, MULTISAMPLE_ENABLE, cso->multisample);
  235.  
  236.     SB_IMMED_3D(so, LINE_SMOOTH_ENABLE, cso->line_smooth);
  237.     if (cso->line_smooth)
  238.        SB_BEGIN_3D(so, LINE_WIDTH_SMOOTH, 1);
  239.     else
  240.        SB_BEGIN_3D(so, LINE_WIDTH_ALIASED, 1);
  241.     SB_DATA    (so, fui(cso->line_width));
  242.  
  243.     SB_IMMED_3D(so, LINE_STIPPLE_ENABLE, cso->line_stipple_enable);
  244.     if (cso->line_stipple_enable) {
  245.         SB_BEGIN_3D(so, LINE_STIPPLE_PATTERN, 1);
  246.         SB_DATA    (so, (cso->line_stipple_pattern << 8) |
  247.                          cso->line_stipple_factor);
  248.  
  249.     }
  250.  
  251.     SB_IMMED_3D(so, VP_POINT_SIZE, cso->point_size_per_vertex);
  252.     if (!cso->point_size_per_vertex) {
  253.        SB_BEGIN_3D(so, POINT_SIZE, 1);
  254.        SB_DATA    (so, fui(cso->point_size));
  255.     }
  256.  
  257.     reg = (cso->sprite_coord_mode == PIPE_SPRITE_COORD_UPPER_LEFT) ?
  258.        NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_UPPER_LEFT :
  259.        NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_LOWER_LEFT;
  260.  
  261.     SB_BEGIN_3D(so, POINT_COORD_REPLACE, 1);
  262.     SB_DATA    (so, ((cso->sprite_coord_enable & 0xff) << 3) | reg);
  263.     SB_IMMED_3D(so, POINT_SPRITE_ENABLE, cso->point_quad_rasterization);
  264.     SB_IMMED_3D(so, POINT_SMOOTH_ENABLE, cso->point_smooth);
  265.  
  266.     SB_BEGIN_3D(so, MACRO_POLYGON_MODE_FRONT, 1);
  267.     SB_DATA    (so, nvgl_polygon_mode(cso->fill_front));
  268.     SB_BEGIN_3D(so, MACRO_POLYGON_MODE_BACK, 1);
  269.     SB_DATA    (so, nvgl_polygon_mode(cso->fill_back));
  270.     SB_IMMED_3D(so, POLYGON_SMOOTH_ENABLE, cso->poly_smooth);
  271.  
  272.     SB_BEGIN_3D(so, CULL_FACE_ENABLE, 3);
  273.     SB_DATA    (so, cso->cull_face != PIPE_FACE_NONE);
  274.     SB_DATA    (so, cso->front_ccw ? NVC0_3D_FRONT_FACE_CCW :
  275.                                      NVC0_3D_FRONT_FACE_CW);
  276.     switch (cso->cull_face) {
  277.     case PIPE_FACE_FRONT_AND_BACK:
  278.        SB_DATA(so, NVC0_3D_CULL_FACE_FRONT_AND_BACK);
  279.        break;
  280.     case PIPE_FACE_FRONT:
  281.        SB_DATA(so, NVC0_3D_CULL_FACE_FRONT);
  282.        break;
  283.     case PIPE_FACE_BACK:
  284.     default:
  285.        SB_DATA(so, NVC0_3D_CULL_FACE_BACK);
  286.        break;
  287.     }
  288.  
  289.     SB_IMMED_3D(so, POLYGON_STIPPLE_ENABLE, cso->poly_stipple_enable);
  290.     SB_BEGIN_3D(so, POLYGON_OFFSET_POINT_ENABLE, 3);
  291.     SB_DATA    (so, cso->offset_point);
  292.     SB_DATA    (so, cso->offset_line);
  293.     SB_DATA    (so, cso->offset_tri);
  294.  
  295.     if (cso->offset_point || cso->offset_line || cso->offset_tri) {
  296.         SB_BEGIN_3D(so, POLYGON_OFFSET_FACTOR, 1);
  297.         SB_DATA    (so, fui(cso->offset_scale));
  298.         SB_BEGIN_3D(so, POLYGON_OFFSET_UNITS, 1);
  299.         SB_DATA    (so, fui(cso->offset_units * 2.0f));
  300.         SB_BEGIN_3D(so, POLYGON_OFFSET_CLAMP, 1);
  301.         SB_DATA    (so, fui(cso->offset_clamp));
  302.     }
  303.  
  304.     if (cso->depth_clip)
  305.        reg = NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1;
  306.     else
  307.        reg =
  308.           NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1 |
  309.           NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR |
  310.           NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR |
  311.           NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2;
  312.  
  313.     SB_BEGIN_3D(so, VIEW_VOLUME_CLIP_CTRL, 1);
  314.     SB_DATA    (so, reg);
  315.  
  316.     SB_IMMED_3D(so, DEPTH_CLIP_NEGATIVE_Z, cso->clip_halfz);
  317.  
  318.     SB_IMMED_3D(so, PIXEL_CENTER_INTEGER, !cso->half_pixel_center);
  319.  
  320.     assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
  321.     return (void *)so;
  322. }
  323.  
  324. static void
  325. nvc0_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso)
  326. {
  327.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  328.  
  329.    nvc0->rast = hwcso;
  330.    nvc0->dirty |= NVC0_NEW_RASTERIZER;
  331. }
  332.  
  333. static void
  334. nvc0_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
  335. {
  336.    FREE(hwcso);
  337. }
  338.  
  339. static void *
  340. nvc0_zsa_state_create(struct pipe_context *pipe,
  341.                       const struct pipe_depth_stencil_alpha_state *cso)
  342. {
  343.    struct nvc0_zsa_stateobj *so = CALLOC_STRUCT(nvc0_zsa_stateobj);
  344.  
  345.    so->pipe = *cso;
  346.  
  347.    SB_IMMED_3D(so, DEPTH_TEST_ENABLE, cso->depth.enabled);
  348.    if (cso->depth.enabled) {
  349.       SB_IMMED_3D(so, DEPTH_WRITE_ENABLE, cso->depth.writemask);
  350.       SB_BEGIN_3D(so, DEPTH_TEST_FUNC, 1);
  351.       SB_DATA    (so, nvgl_comparison_op(cso->depth.func));
  352.    }
  353.  
  354.    if (cso->stencil[0].enabled) {
  355.       SB_BEGIN_3D(so, STENCIL_ENABLE, 5);
  356.       SB_DATA    (so, 1);
  357.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].fail_op));
  358.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
  359.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
  360.       SB_DATA    (so, nvgl_comparison_op(cso->stencil[0].func));
  361.       SB_BEGIN_3D(so, STENCIL_FRONT_FUNC_MASK, 2);
  362.       SB_DATA    (so, cso->stencil[0].valuemask);
  363.       SB_DATA    (so, cso->stencil[0].writemask);
  364.    } else {
  365.       SB_IMMED_3D(so, STENCIL_ENABLE, 0);
  366.    }
  367.  
  368.    if (cso->stencil[1].enabled) {
  369.       assert(cso->stencil[0].enabled);
  370.       SB_BEGIN_3D(so, STENCIL_TWO_SIDE_ENABLE, 5);
  371.       SB_DATA    (so, 1);
  372.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].fail_op));
  373.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
  374.       SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
  375.       SB_DATA    (so, nvgl_comparison_op(cso->stencil[1].func));
  376.       SB_BEGIN_3D(so, STENCIL_BACK_MASK, 2);
  377.       SB_DATA    (so, cso->stencil[1].writemask);
  378.       SB_DATA    (so, cso->stencil[1].valuemask);
  379.    } else
  380.    if (cso->stencil[0].enabled) {
  381.       SB_IMMED_3D(so, STENCIL_TWO_SIDE_ENABLE, 0);
  382.    }
  383.  
  384.    SB_IMMED_3D(so, ALPHA_TEST_ENABLE, cso->alpha.enabled);
  385.    if (cso->alpha.enabled) {
  386.       SB_BEGIN_3D(so, ALPHA_TEST_REF, 2);
  387.       SB_DATA    (so, fui(cso->alpha.ref_value));
  388.       SB_DATA    (so, nvgl_comparison_op(cso->alpha.func));
  389.    }
  390.  
  391.    assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
  392.    return (void *)so;
  393. }
  394.  
  395. static void
  396. nvc0_zsa_state_bind(struct pipe_context *pipe, void *hwcso)
  397. {
  398.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  399.  
  400.    nvc0->zsa = hwcso;
  401.    nvc0->dirty |= NVC0_NEW_ZSA;
  402. }
  403.  
  404. static void
  405. nvc0_zsa_state_delete(struct pipe_context *pipe, void *hwcso)
  406. {
  407.    FREE(hwcso);
  408. }
  409.  
  410. /* ====================== SAMPLERS AND TEXTURES ================================
  411.  */
  412.  
  413. #define NV50_TSC_WRAP_CASE(n) \
  414.     case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
  415.  
  416. static void
  417. nvc0_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
  418. {
  419.    unsigned s, i;
  420.  
  421.    for (s = 0; s < 5; ++s)
  422.       for (i = 0; i < nvc0_context(pipe)->num_samplers[s]; ++i)
  423.          if (nvc0_context(pipe)->samplers[s][i] == hwcso)
  424.             nvc0_context(pipe)->samplers[s][i] = NULL;
  425.  
  426.    nvc0_screen_tsc_free(nvc0_context(pipe)->screen, nv50_tsc_entry(hwcso));
  427.  
  428.    FREE(hwcso);
  429. }
  430.  
  431. static INLINE void
  432. nvc0_stage_sampler_states_bind(struct nvc0_context *nvc0, int s,
  433.                                unsigned nr, void **hwcso)
  434. {
  435.    unsigned i;
  436.  
  437.    for (i = 0; i < nr; ++i) {
  438.       struct nv50_tsc_entry *old = nvc0->samplers[s][i];
  439.  
  440.       if (hwcso[i] == old)
  441.          continue;
  442.       nvc0->samplers_dirty[s] |= 1 << i;
  443.  
  444.       nvc0->samplers[s][i] = nv50_tsc_entry(hwcso[i]);
  445.       if (old)
  446.          nvc0_screen_tsc_unlock(nvc0->screen, old);
  447.    }
  448.    for (; i < nvc0->num_samplers[s]; ++i) {
  449.       if (nvc0->samplers[s][i]) {
  450.          nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
  451.          nvc0->samplers[s][i] = NULL;
  452.       }
  453.    }
  454.  
  455.    nvc0->num_samplers[s] = nr;
  456.  
  457.    nvc0->dirty |= NVC0_NEW_SAMPLERS;
  458. }
  459.  
  460. static void
  461. nvc0_stage_sampler_states_bind_range(struct nvc0_context *nvc0,
  462.                                      const unsigned s,
  463.                                      unsigned start, unsigned nr, void **cso)
  464. {
  465.    const unsigned end = start + nr;
  466.    int last_valid = -1;
  467.    unsigned i;
  468.  
  469.    if (cso) {
  470.       for (i = start; i < end; ++i) {
  471.          const unsigned p = i - start;
  472.          if (cso[p])
  473.             last_valid = i;
  474.          if (cso[p] == nvc0->samplers[s][i])
  475.             continue;
  476.          nvc0->samplers_dirty[s] |= 1 << i;
  477.  
  478.          if (nvc0->samplers[s][i])
  479.             nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
  480.          nvc0->samplers[s][i] = cso[p];
  481.       }
  482.    } else {
  483.       for (i = start; i < end; ++i) {
  484.          if (nvc0->samplers[s][i]) {
  485.             nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
  486.             nvc0->samplers[s][i] = NULL;
  487.             nvc0->samplers_dirty[s] |= 1 << i;
  488.          }
  489.       }
  490.    }
  491.  
  492.    if (nvc0->num_samplers[s] <= end) {
  493.       if (last_valid < 0) {
  494.          for (i = start; i && !nvc0->samplers[s][i - 1]; --i);
  495.          nvc0->num_samplers[s] = i;
  496.       } else {
  497.          nvc0->num_samplers[s] = last_valid + 1;
  498.       }
  499.    }
  500. }
  501.  
  502. static void
  503. nvc0_bind_sampler_states(struct pipe_context *pipe, unsigned shader,
  504.                          unsigned start, unsigned nr, void **s)
  505. {
  506.    switch (shader) {
  507.    case PIPE_SHADER_VERTEX:
  508.       assert(start == 0);
  509.       nvc0_stage_sampler_states_bind(nvc0_context(pipe), 0, nr, s);
  510.       break;
  511.    case PIPE_SHADER_GEOMETRY:
  512.       assert(start == 0);
  513.       nvc0_stage_sampler_states_bind(nvc0_context(pipe), 3, nr, s);
  514.       break;
  515.    case PIPE_SHADER_FRAGMENT:
  516.       assert(start == 0);
  517.       nvc0_stage_sampler_states_bind(nvc0_context(pipe), 4, nr, s);
  518.       break;
  519.    case PIPE_SHADER_COMPUTE:
  520.       nvc0_stage_sampler_states_bind_range(nvc0_context(pipe), 5,
  521.                                            start, nr, s);
  522.       nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SAMPLERS;
  523.       break;
  524.    }
  525. }
  526.  
  527.  
  528. /* NOTE: only called when not referenced anywhere, won't be bound */
  529. static void
  530. nvc0_sampler_view_destroy(struct pipe_context *pipe,
  531.                           struct pipe_sampler_view *view)
  532. {
  533.    pipe_resource_reference(&view->texture, NULL);
  534.  
  535.    nvc0_screen_tic_free(nvc0_context(pipe)->screen, nv50_tic_entry(view));
  536.  
  537.    FREE(nv50_tic_entry(view));
  538. }
  539.  
  540. static INLINE void
  541. nvc0_stage_set_sampler_views(struct nvc0_context *nvc0, int s,
  542.                              unsigned nr,
  543.                              struct pipe_sampler_view **views)
  544. {
  545.    unsigned i;
  546.  
  547.    for (i = 0; i < nr; ++i) {
  548.       struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
  549.  
  550.       if (views[i] == nvc0->textures[s][i])
  551.          continue;
  552.       nvc0->textures_dirty[s] |= 1 << i;
  553.  
  554.       if (old) {
  555.          nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(s, i));
  556.          nvc0_screen_tic_unlock(nvc0->screen, old);
  557.       }
  558.  
  559.       pipe_sampler_view_reference(&nvc0->textures[s][i], views[i]);
  560.    }
  561.  
  562.    for (i = nr; i < nvc0->num_textures[s]; ++i) {
  563.       struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
  564.       if (old) {
  565.          nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(s, i));
  566.          nvc0_screen_tic_unlock(nvc0->screen, old);
  567.          pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
  568.       }
  569.    }
  570.  
  571.    nvc0->num_textures[s] = nr;
  572.  
  573.    nvc0->dirty |= NVC0_NEW_TEXTURES;
  574. }
  575.  
  576. static void
  577. nvc0_stage_set_sampler_views_range(struct nvc0_context *nvc0, const unsigned s,
  578.                                    unsigned start, unsigned nr,
  579.                                    struct pipe_sampler_view **views)
  580. {
  581.    struct nouveau_bufctx *bctx = (s == 5) ? nvc0->bufctx_cp : nvc0->bufctx_3d;
  582.    const unsigned end = start + nr;
  583.    const unsigned bin = (s == 5) ? NVC0_BIND_CP_TEX(0) : NVC0_BIND_TEX(s, 0);
  584.    int last_valid = -1;
  585.    unsigned i;
  586.  
  587.    if (views) {
  588.       for (i = start; i < end; ++i) {
  589.          const unsigned p = i - start;
  590.          if (views[p])
  591.             last_valid = i;
  592.          if (views[p] == nvc0->textures[s][i])
  593.             continue;
  594.          nvc0->textures_dirty[s] |= 1 << i;
  595.  
  596.          if (nvc0->textures[s][i]) {
  597.             struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
  598.             nouveau_bufctx_reset(bctx, bin + i);
  599.             nvc0_screen_tic_unlock(nvc0->screen, old);
  600.          }
  601.          pipe_sampler_view_reference(&nvc0->textures[s][i], views[p]);
  602.       }
  603.    } else {
  604.       for (i = start; i < end; ++i) {
  605.          struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
  606.          if (!old)
  607.             continue;
  608.          nvc0->textures_dirty[s] |= 1 << i;
  609.  
  610.          nvc0_screen_tic_unlock(nvc0->screen, old);
  611.          pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
  612.          nouveau_bufctx_reset(bctx, bin + i);
  613.       }
  614.    }
  615.  
  616.    if (nvc0->num_textures[s] <= end) {
  617.       if (last_valid < 0) {
  618.          for (i = start; i && !nvc0->textures[s][i - 1]; --i);
  619.          nvc0->num_textures[s] = i;
  620.       } else {
  621.          nvc0->num_textures[s] = last_valid + 1;
  622.       }
  623.    }
  624. }
  625.  
  626. static void
  627. nvc0_set_sampler_views(struct pipe_context *pipe, unsigned shader,
  628.                        unsigned start, unsigned nr,
  629.                        struct pipe_sampler_view **views)
  630. {
  631.    assert(start == 0);
  632.    switch (shader) {
  633.    case PIPE_SHADER_VERTEX:
  634.       nvc0_stage_set_sampler_views(nvc0_context(pipe), 0, nr, views);
  635.       break;
  636.    case PIPE_SHADER_GEOMETRY:
  637.       nvc0_stage_set_sampler_views(nvc0_context(pipe), 3, nr, views);
  638.       break;
  639.    case PIPE_SHADER_FRAGMENT:
  640.       nvc0_stage_set_sampler_views(nvc0_context(pipe), 4, nr, views);
  641.       break;
  642.    case PIPE_SHADER_COMPUTE:
  643.       nvc0_stage_set_sampler_views_range(nvc0_context(pipe), 5,
  644.                                          start, nr, views);
  645.       nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_TEXTURES;
  646.       break;
  647.    default:
  648.       ;
  649.    }
  650. }
  651.  
  652.  
  653. /* ============================= SHADERS =======================================
  654.  */
  655.  
  656. static void *
  657. nvc0_sp_state_create(struct pipe_context *pipe,
  658.                      const struct pipe_shader_state *cso, unsigned type)
  659. {
  660.    struct nvc0_program *prog;
  661.  
  662.    prog = CALLOC_STRUCT(nvc0_program);
  663.    if (!prog)
  664.       return NULL;
  665.  
  666.    prog->type = type;
  667.  
  668.    if (cso->tokens)
  669.       prog->pipe.tokens = tgsi_dup_tokens(cso->tokens);
  670.  
  671.    if (cso->stream_output.num_outputs)
  672.       prog->pipe.stream_output = cso->stream_output;
  673.  
  674.    return (void *)prog;
  675. }
  676.  
  677. static void
  678. nvc0_sp_state_delete(struct pipe_context *pipe, void *hwcso)
  679. {
  680.    struct nvc0_program *prog = (struct nvc0_program *)hwcso;
  681.  
  682.    nvc0_program_destroy(nvc0_context(pipe), prog);
  683.  
  684.    FREE((void *)prog->pipe.tokens);
  685.    FREE(prog);
  686. }
  687.  
  688. static void *
  689. nvc0_vp_state_create(struct pipe_context *pipe,
  690.                      const struct pipe_shader_state *cso)
  691. {
  692.    return nvc0_sp_state_create(pipe, cso, PIPE_SHADER_VERTEX);
  693. }
  694.  
  695. static void
  696. nvc0_vp_state_bind(struct pipe_context *pipe, void *hwcso)
  697. {
  698.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  699.  
  700.     nvc0->vertprog = hwcso;
  701.     nvc0->dirty |= NVC0_NEW_VERTPROG;
  702. }
  703.  
  704. static void *
  705. nvc0_fp_state_create(struct pipe_context *pipe,
  706.                      const struct pipe_shader_state *cso)
  707. {
  708.    return nvc0_sp_state_create(pipe, cso, PIPE_SHADER_FRAGMENT);
  709. }
  710.  
  711. static void
  712. nvc0_fp_state_bind(struct pipe_context *pipe, void *hwcso)
  713. {
  714.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  715.  
  716.     nvc0->fragprog = hwcso;
  717.     nvc0->dirty |= NVC0_NEW_FRAGPROG;
  718. }
  719.  
  720. static void *
  721. nvc0_gp_state_create(struct pipe_context *pipe,
  722.                      const struct pipe_shader_state *cso)
  723. {
  724.    return nvc0_sp_state_create(pipe, cso, PIPE_SHADER_GEOMETRY);
  725. }
  726.  
  727. static void
  728. nvc0_gp_state_bind(struct pipe_context *pipe, void *hwcso)
  729. {
  730.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  731.  
  732.     nvc0->gmtyprog = hwcso;
  733.     nvc0->dirty |= NVC0_NEW_GMTYPROG;
  734. }
  735.  
  736. static void *
  737. nvc0_cp_state_create(struct pipe_context *pipe,
  738.                      const struct pipe_compute_state *cso)
  739. {
  740.    struct nvc0_program *prog;
  741.  
  742.    prog = CALLOC_STRUCT(nvc0_program);
  743.    if (!prog)
  744.       return NULL;
  745.    prog->type = PIPE_SHADER_COMPUTE;
  746.  
  747.    prog->cp.smem_size = cso->req_local_mem;
  748.    prog->cp.lmem_size = cso->req_private_mem;
  749.    prog->parm_size = cso->req_input_mem;
  750.  
  751.    prog->pipe.tokens = tgsi_dup_tokens((const struct tgsi_token *)cso->prog);
  752.  
  753.    return (void *)prog;
  754. }
  755.  
  756. static void
  757. nvc0_cp_state_bind(struct pipe_context *pipe, void *hwcso)
  758. {
  759.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  760.  
  761.     nvc0->compprog = hwcso;
  762.     nvc0->dirty_cp |= NVC0_NEW_CP_PROGRAM;
  763. }
  764.  
  765. static void
  766. nvc0_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
  767.                          struct pipe_constant_buffer *cb)
  768. {
  769.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  770.    struct pipe_resource *res = cb ? cb->buffer : NULL;
  771.    const unsigned s = nvc0_shader_stage(shader);
  772.    const unsigned i = index;
  773.  
  774.    if (unlikely(shader == PIPE_SHADER_COMPUTE)) {
  775.       assert(!cb || !cb->user_buffer);
  776.       if (nvc0->constbuf[s][i].u.buf)
  777.          nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_CB(i));
  778.  
  779.       nvc0->dirty_cp |= NVC0_NEW_CP_CONSTBUF;
  780.    } else {
  781.       if (nvc0->constbuf[s][i].user)
  782.          nvc0->constbuf[s][i].u.buf = NULL;
  783.       else
  784.       if (nvc0->constbuf[s][i].u.buf)
  785.          nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_CB(s, i));
  786.  
  787.       nvc0->dirty |= NVC0_NEW_CONSTBUF;
  788.    }
  789.    nvc0->constbuf_dirty[s] |= 1 << i;
  790.  
  791.    pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res);
  792.  
  793.    nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE;
  794.    if (nvc0->constbuf[s][i].user) {
  795.       nvc0->constbuf[s][i].u.data = cb->user_buffer;
  796.       nvc0->constbuf[s][i].size = cb->buffer_size;
  797.       nvc0->constbuf_valid[s] |= 1 << i;
  798.    } else
  799.    if (cb) {
  800.       nvc0->constbuf[s][i].offset = cb->buffer_offset;
  801.       nvc0->constbuf[s][i].size = align(cb->buffer_size, 0x100);
  802.       nvc0->constbuf_valid[s] |= 1 << i;
  803.    }
  804.    else {
  805.       nvc0->constbuf_valid[s] &= ~(1 << i);
  806.    }
  807. }
  808.  
  809. /* =============================================================================
  810.  */
  811.  
  812. static void
  813. nvc0_set_blend_color(struct pipe_context *pipe,
  814.                      const struct pipe_blend_color *bcol)
  815. {
  816.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  817.  
  818.     nvc0->blend_colour = *bcol;
  819.     nvc0->dirty |= NVC0_NEW_BLEND_COLOUR;
  820. }
  821.  
  822. static void
  823. nvc0_set_stencil_ref(struct pipe_context *pipe,
  824.                      const struct pipe_stencil_ref *sr)
  825. {
  826.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  827.  
  828.     nvc0->stencil_ref = *sr;
  829.     nvc0->dirty |= NVC0_NEW_STENCIL_REF;
  830. }
  831.  
  832. static void
  833. nvc0_set_clip_state(struct pipe_context *pipe,
  834.                     const struct pipe_clip_state *clip)
  835. {
  836.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  837.  
  838.     memcpy(nvc0->clip.ucp, clip->ucp, sizeof(clip->ucp));
  839.  
  840.     nvc0->dirty |= NVC0_NEW_CLIP;
  841. }
  842.  
  843. static void
  844. nvc0_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
  845. {
  846.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  847.  
  848.     nvc0->sample_mask = sample_mask;
  849.     nvc0->dirty |= NVC0_NEW_SAMPLE_MASK;
  850. }
  851.  
  852. static void
  853. nvc0_set_min_samples(struct pipe_context *pipe, unsigned min_samples)
  854. {
  855.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  856.  
  857.    if (nvc0->min_samples != min_samples) {
  858.       nvc0->min_samples = min_samples;
  859.       nvc0->dirty |= NVC0_NEW_MIN_SAMPLES;
  860.    }
  861. }
  862.  
  863. static void
  864. nvc0_set_framebuffer_state(struct pipe_context *pipe,
  865.                            const struct pipe_framebuffer_state *fb)
  866. {
  867.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  868.     unsigned i;
  869.  
  870.     nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
  871.  
  872.     for (i = 0; i < fb->nr_cbufs; ++i)
  873.        pipe_surface_reference(&nvc0->framebuffer.cbufs[i], fb->cbufs[i]);
  874.     for (; i < nvc0->framebuffer.nr_cbufs; ++i)
  875.        pipe_surface_reference(&nvc0->framebuffer.cbufs[i], NULL);
  876.  
  877.     nvc0->framebuffer.nr_cbufs = fb->nr_cbufs;
  878.  
  879.     nvc0->framebuffer.width = fb->width;
  880.     nvc0->framebuffer.height = fb->height;
  881.  
  882.     pipe_surface_reference(&nvc0->framebuffer.zsbuf, fb->zsbuf);
  883.  
  884.     nvc0->dirty |= NVC0_NEW_FRAMEBUFFER;
  885. }
  886.  
  887. static void
  888. nvc0_set_polygon_stipple(struct pipe_context *pipe,
  889.                          const struct pipe_poly_stipple *stipple)
  890. {
  891.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  892.  
  893.     nvc0->stipple = *stipple;
  894.     nvc0->dirty |= NVC0_NEW_STIPPLE;
  895. }
  896.  
  897. static void
  898. nvc0_set_scissor_states(struct pipe_context *pipe,
  899.                         unsigned start_slot,
  900.                         unsigned num_scissors,
  901.                         const struct pipe_scissor_state *scissor)
  902. {
  903.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  904.    int i;
  905.  
  906.    assert(start_slot + num_scissors <= NVC0_MAX_VIEWPORTS);
  907.    for (i = 0; i < num_scissors; i++) {
  908.       if (!memcmp(&nvc0->scissors[start_slot + i], &scissor[i], sizeof(*scissor)))
  909.          continue;
  910.       nvc0->scissors[start_slot + i] = scissor[i];
  911.       nvc0->scissors_dirty |= 1 << (start_slot + i);
  912.       nvc0->dirty |= NVC0_NEW_SCISSOR;
  913.    }
  914. }
  915.  
  916. static void
  917. nvc0_set_viewport_states(struct pipe_context *pipe,
  918.                          unsigned start_slot,
  919.                          unsigned num_viewports,
  920.                          const struct pipe_viewport_state *vpt)
  921. {
  922.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  923.    int i;
  924.  
  925.    assert(start_slot + num_viewports <= NVC0_MAX_VIEWPORTS);
  926.    for (i = 0; i < num_viewports; i++) {
  927.       if (!memcmp(&nvc0->viewports[start_slot + i], &vpt[i], sizeof(*vpt)))
  928.          continue;
  929.       nvc0->viewports[start_slot + i] = vpt[i];
  930.       nvc0->viewports_dirty |= 1 << (start_slot + i);
  931.       nvc0->dirty |= NVC0_NEW_VIEWPORT;
  932.    }
  933.  
  934. }
  935.  
  936. static void
  937. nvc0_set_vertex_buffers(struct pipe_context *pipe,
  938.                         unsigned start_slot, unsigned count,
  939.                         const struct pipe_vertex_buffer *vb)
  940. {
  941.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  942.     unsigned i;
  943.  
  944.     util_set_vertex_buffers_count(nvc0->vtxbuf, &nvc0->num_vtxbufs, vb,
  945.                                   start_slot, count);
  946.  
  947.     if (!vb) {
  948.        nvc0->vbo_user &= ~(((1ull << count) - 1) << start_slot);
  949.        nvc0->constant_vbos &= ~(((1ull << count) - 1) << start_slot);
  950.        return;
  951.     }
  952.  
  953.     for (i = 0; i < count; ++i) {
  954.        unsigned dst_index = start_slot + i;
  955.  
  956.        if (vb[i].user_buffer) {
  957.           nvc0->vbo_user |= 1 << dst_index;
  958.           if (!vb[i].stride && nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
  959.              nvc0->constant_vbos |= 1 << dst_index;
  960.           else
  961.              nvc0->constant_vbos &= ~(1 << dst_index);
  962.        } else {
  963.           nvc0->vbo_user &= ~(1 << dst_index);
  964.           nvc0->constant_vbos &= ~(1 << dst_index);
  965.        }
  966.     }
  967.  
  968.     nvc0->dirty |= NVC0_NEW_ARRAYS;
  969.     nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX);
  970. }
  971.  
  972. static void
  973. nvc0_set_index_buffer(struct pipe_context *pipe,
  974.                       const struct pipe_index_buffer *ib)
  975. {
  976.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  977.  
  978.     if (nvc0->idxbuf.buffer)
  979.        nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_IDX);
  980.  
  981.     if (ib) {
  982.        pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
  983.        nvc0->idxbuf.index_size = ib->index_size;
  984.        if (ib->buffer) {
  985.           nvc0->idxbuf.offset = ib->offset;
  986.           nvc0->dirty |= NVC0_NEW_IDXBUF;
  987.        } else {
  988.           nvc0->idxbuf.user_buffer = ib->user_buffer;
  989.           nvc0->dirty &= ~NVC0_NEW_IDXBUF;
  990.        }
  991.     } else {
  992.        nvc0->dirty &= ~NVC0_NEW_IDXBUF;
  993.        pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
  994.     }
  995. }
  996.  
  997. static void
  998. nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
  999. {
  1000.     struct nvc0_context *nvc0 = nvc0_context(pipe);
  1001.  
  1002.     nvc0->vertex = hwcso;
  1003.     nvc0->dirty |= NVC0_NEW_VERTEX;
  1004. }
  1005.  
  1006. static struct pipe_stream_output_target *
  1007. nvc0_so_target_create(struct pipe_context *pipe,
  1008.                       struct pipe_resource *res,
  1009.                       unsigned offset, unsigned size)
  1010. {
  1011.    struct nv04_resource *buf = (struct nv04_resource *)res;
  1012.    struct nvc0_so_target *targ = MALLOC_STRUCT(nvc0_so_target);
  1013.    if (!targ)
  1014.       return NULL;
  1015.  
  1016.    targ->pq = pipe->create_query(pipe, NVC0_QUERY_TFB_BUFFER_OFFSET, 0);
  1017.    if (!targ->pq) {
  1018.       FREE(targ);
  1019.       return NULL;
  1020.    }
  1021.    targ->clean = TRUE;
  1022.  
  1023.    targ->pipe.buffer_size = size;
  1024.    targ->pipe.buffer_offset = offset;
  1025.    targ->pipe.context = pipe;
  1026.    targ->pipe.buffer = NULL;
  1027.    pipe_resource_reference(&targ->pipe.buffer, res);
  1028.    pipe_reference_init(&targ->pipe.reference, 1);
  1029.  
  1030.    assert(buf->base.target == PIPE_BUFFER);
  1031.    util_range_add(&buf->valid_buffer_range, offset, offset + size);
  1032.  
  1033.    return &targ->pipe;
  1034. }
  1035.  
  1036. static void
  1037. nvc0_so_target_destroy(struct pipe_context *pipe,
  1038.                        struct pipe_stream_output_target *ptarg)
  1039. {
  1040.    struct nvc0_so_target *targ = nvc0_so_target(ptarg);
  1041.    pipe->destroy_query(pipe, targ->pq);
  1042.    pipe_resource_reference(&targ->pipe.buffer, NULL);
  1043.    FREE(targ);
  1044. }
  1045.  
  1046. static void
  1047. nvc0_set_transform_feedback_targets(struct pipe_context *pipe,
  1048.                                     unsigned num_targets,
  1049.                                     struct pipe_stream_output_target **targets,
  1050.                                     const unsigned *offsets)
  1051. {
  1052.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  1053.    unsigned i;
  1054.    boolean serialize = TRUE;
  1055.  
  1056.    assert(num_targets <= 4);
  1057.  
  1058.    for (i = 0; i < num_targets; ++i) {
  1059.       const boolean changed = nvc0->tfbbuf[i] != targets[i];
  1060.       const boolean append = (offsets[i] == ((unsigned)-1));
  1061.       if (!changed && append)
  1062.          continue;
  1063.       nvc0->tfbbuf_dirty |= 1 << i;
  1064.  
  1065.       if (nvc0->tfbbuf[i] && changed)
  1066.          nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize);
  1067.  
  1068.       if (targets[i] && !append)
  1069.          nvc0_so_target(targets[i])->clean = TRUE;
  1070.  
  1071.       pipe_so_target_reference(&nvc0->tfbbuf[i], targets[i]);
  1072.    }
  1073.    for (; i < nvc0->num_tfbbufs; ++i) {
  1074.       if (nvc0->tfbbuf[i]) {
  1075.          nvc0->tfbbuf_dirty |= 1 << i;
  1076.          nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize);
  1077.          pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
  1078.       }
  1079.    }
  1080.    nvc0->num_tfbbufs = num_targets;
  1081.  
  1082.    if (nvc0->tfbbuf_dirty)
  1083.       nvc0->dirty |= NVC0_NEW_TFB_TARGETS;
  1084. }
  1085.  
  1086. static void
  1087. nvc0_bind_surfaces_range(struct nvc0_context *nvc0, const unsigned t,
  1088.                          unsigned start, unsigned nr,
  1089.                          struct pipe_surface **psurfaces)
  1090. {
  1091.    const unsigned end = start + nr;
  1092.    const unsigned mask = ((1 << nr) - 1) << start;
  1093.    unsigned i;
  1094.  
  1095.    if (psurfaces) {
  1096.       for (i = start; i < end; ++i) {
  1097.          const unsigned p = i - start;
  1098.          if (psurfaces[p])
  1099.             nvc0->surfaces_valid[t] |= (1 << i);
  1100.          else
  1101.             nvc0->surfaces_valid[t] &= ~(1 << i);
  1102.          pipe_surface_reference(&nvc0->surfaces[t][i], psurfaces[p]);
  1103.       }
  1104.    } else {
  1105.       for (i = start; i < end; ++i)
  1106.          pipe_surface_reference(&nvc0->surfaces[t][i], NULL);
  1107.       nvc0->surfaces_valid[t] &= ~mask;
  1108.    }
  1109.    nvc0->surfaces_dirty[t] |= mask;
  1110.  
  1111.    if (t == 0)
  1112.       nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_SUF);
  1113.    else
  1114.       nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
  1115. }
  1116.  
  1117. static void
  1118. nvc0_set_compute_resources(struct pipe_context *pipe,
  1119.                            unsigned start, unsigned nr,
  1120.                            struct pipe_surface **resources)
  1121. {
  1122.    nvc0_bind_surfaces_range(nvc0_context(pipe), 1, start, nr, resources);
  1123.  
  1124.    nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SURFACES;
  1125. }
  1126.  
  1127. static void
  1128. nvc0_set_shader_resources(struct pipe_context *pipe,
  1129.                           unsigned start, unsigned nr,
  1130.                           struct pipe_surface **resources)
  1131. {
  1132.    nvc0_bind_surfaces_range(nvc0_context(pipe), 0, start, nr, resources);
  1133.  
  1134.    nvc0_context(pipe)->dirty |= NVC0_NEW_SURFACES;
  1135. }
  1136.  
  1137. static INLINE void
  1138. nvc0_set_global_handle(uint32_t *phandle, struct pipe_resource *res)
  1139. {
  1140.    struct nv04_resource *buf = nv04_resource(res);
  1141.    if (buf) {
  1142.       uint64_t limit = (buf->address + buf->base.width0) - 1;
  1143.       if (limit < (1ULL << 32)) {
  1144.          *phandle = (uint32_t)buf->address;
  1145.       } else {
  1146.          NOUVEAU_ERR("Cannot map into TGSI_RESOURCE_GLOBAL: "
  1147.                      "resource not contained within 32-bit address space !\n");
  1148.          *phandle = 0;
  1149.       }
  1150.    } else {
  1151.       *phandle = 0;
  1152.    }
  1153. }
  1154.  
  1155. static void
  1156. nvc0_set_global_bindings(struct pipe_context *pipe,
  1157.                          unsigned start, unsigned nr,
  1158.                          struct pipe_resource **resources,
  1159.                          uint32_t **handles)
  1160. {
  1161.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  1162.    struct pipe_resource **ptr;
  1163.    unsigned i;
  1164.    const unsigned end = start + nr;
  1165.  
  1166.    if (nvc0->global_residents.size <= (end * sizeof(struct pipe_resource *))) {
  1167.       const unsigned old_size = nvc0->global_residents.size;
  1168.       const unsigned req_size = end * sizeof(struct pipe_resource *);
  1169.       util_dynarray_resize(&nvc0->global_residents, req_size);
  1170.       memset((uint8_t *)nvc0->global_residents.data + old_size, 0,
  1171.              req_size - old_size);
  1172.    }
  1173.  
  1174.    if (resources) {
  1175.       ptr = util_dynarray_element(
  1176.          &nvc0->global_residents, struct pipe_resource *, start);
  1177.       for (i = 0; i < nr; ++i) {
  1178.          pipe_resource_reference(&ptr[i], resources[i]);
  1179.          nvc0_set_global_handle(handles[i], resources[i]);
  1180.       }
  1181.    } else {
  1182.       ptr = util_dynarray_element(
  1183.          &nvc0->global_residents, struct pipe_resource *, start);
  1184.       for (i = 0; i < nr; ++i)
  1185.          pipe_resource_reference(&ptr[i], NULL);
  1186.    }
  1187.  
  1188.    nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_GLOBAL);
  1189.  
  1190.    nvc0->dirty_cp = NVC0_NEW_CP_GLOBALS;
  1191. }
  1192.  
  1193. void
  1194. nvc0_init_state_functions(struct nvc0_context *nvc0)
  1195. {
  1196.    struct pipe_context *pipe = &nvc0->base.pipe;
  1197.  
  1198.    pipe->create_blend_state = nvc0_blend_state_create;
  1199.    pipe->bind_blend_state = nvc0_blend_state_bind;
  1200.    pipe->delete_blend_state = nvc0_blend_state_delete;
  1201.  
  1202.    pipe->create_rasterizer_state = nvc0_rasterizer_state_create;
  1203.    pipe->bind_rasterizer_state = nvc0_rasterizer_state_bind;
  1204.    pipe->delete_rasterizer_state = nvc0_rasterizer_state_delete;
  1205.  
  1206.    pipe->create_depth_stencil_alpha_state = nvc0_zsa_state_create;
  1207.    pipe->bind_depth_stencil_alpha_state = nvc0_zsa_state_bind;
  1208.    pipe->delete_depth_stencil_alpha_state = nvc0_zsa_state_delete;
  1209.  
  1210.    pipe->create_sampler_state = nv50_sampler_state_create;
  1211.    pipe->delete_sampler_state = nvc0_sampler_state_delete;
  1212.    pipe->bind_sampler_states = nvc0_bind_sampler_states;
  1213.  
  1214.    pipe->create_sampler_view = nvc0_create_sampler_view;
  1215.    pipe->sampler_view_destroy = nvc0_sampler_view_destroy;
  1216.    pipe->set_sampler_views = nvc0_set_sampler_views;
  1217.  
  1218.    pipe->create_vs_state = nvc0_vp_state_create;
  1219.    pipe->create_fs_state = nvc0_fp_state_create;
  1220.    pipe->create_gs_state = nvc0_gp_state_create;
  1221.    pipe->bind_vs_state = nvc0_vp_state_bind;
  1222.    pipe->bind_fs_state = nvc0_fp_state_bind;
  1223.    pipe->bind_gs_state = nvc0_gp_state_bind;
  1224.    pipe->delete_vs_state = nvc0_sp_state_delete;
  1225.    pipe->delete_fs_state = nvc0_sp_state_delete;
  1226.    pipe->delete_gs_state = nvc0_sp_state_delete;
  1227.  
  1228.    pipe->create_compute_state = nvc0_cp_state_create;
  1229.    pipe->bind_compute_state = nvc0_cp_state_bind;
  1230.    pipe->delete_compute_state = nvc0_sp_state_delete;
  1231.  
  1232.    pipe->set_blend_color = nvc0_set_blend_color;
  1233.    pipe->set_stencil_ref = nvc0_set_stencil_ref;
  1234.    pipe->set_clip_state = nvc0_set_clip_state;
  1235.    pipe->set_sample_mask = nvc0_set_sample_mask;
  1236.    pipe->set_min_samples = nvc0_set_min_samples;
  1237.    pipe->set_constant_buffer = nvc0_set_constant_buffer;
  1238.    pipe->set_framebuffer_state = nvc0_set_framebuffer_state;
  1239.    pipe->set_polygon_stipple = nvc0_set_polygon_stipple;
  1240.    pipe->set_scissor_states = nvc0_set_scissor_states;
  1241.    pipe->set_viewport_states = nvc0_set_viewport_states;
  1242.  
  1243.    pipe->create_vertex_elements_state = nvc0_vertex_state_create;
  1244.    pipe->delete_vertex_elements_state = nvc0_vertex_state_delete;
  1245.    pipe->bind_vertex_elements_state = nvc0_vertex_state_bind;
  1246.  
  1247.    pipe->set_vertex_buffers = nvc0_set_vertex_buffers;
  1248.    pipe->set_index_buffer = nvc0_set_index_buffer;
  1249.  
  1250.    pipe->create_stream_output_target = nvc0_so_target_create;
  1251.    pipe->stream_output_target_destroy = nvc0_so_target_destroy;
  1252.    pipe->set_stream_output_targets = nvc0_set_transform_feedback_targets;
  1253.  
  1254.    pipe->set_global_binding = nvc0_set_global_bindings;
  1255.    pipe->set_compute_resources = nvc0_set_compute_resources;
  1256.    pipe->set_shader_resources = nvc0_set_shader_resources;
  1257.  
  1258.    nvc0->sample_mask = ~0;
  1259.    nvc0->min_samples = 1;
  1260. }
  1261.