Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4358 Serge 1
/*
2
 * Copyright 2010 Christoph Bumiller
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
 * OTHER DEALINGS IN THE SOFTWARE.
21
 */
22
 
23
#include "pipe/p_defines.h"
24
#include "util/u_helpers.h"
25
#include "util/u_inlines.h"
26
#include "util/u_transfer.h"
27
 
28
#include "tgsi/tgsi_parse.h"
29
 
30
#include "nvc0_stateobj.h"
31
#include "nvc0_context.h"
32
 
33
#include "nvc0_3d.xml.h"
34
#include "nv50/nv50_texture.xml.h"
35
 
36
#include "nouveau/nouveau_gldefs.h"
37
 
38
static INLINE uint32_t
39
nvc0_colormask(unsigned mask)
40
{
41
    uint32_t ret = 0;
42
 
43
    if (mask & PIPE_MASK_R)
44
        ret |= 0x0001;
45
    if (mask & PIPE_MASK_G)
46
        ret |= 0x0010;
47
    if (mask & PIPE_MASK_B)
48
        ret |= 0x0100;
49
    if (mask & PIPE_MASK_A)
50
        ret |= 0x1000;
51
 
52
    return ret;
53
}
54
 
55
#define NVC0_BLEND_FACTOR_CASE(a, b) \
56
   case PIPE_BLENDFACTOR_##a: return NV50_3D_BLEND_FACTOR_##b
57
 
58
static INLINE uint32_t
59
nvc0_blend_fac(unsigned factor)
60
{
61
   switch (factor) {
62
   NVC0_BLEND_FACTOR_CASE(ONE, ONE);
63
   NVC0_BLEND_FACTOR_CASE(SRC_COLOR, SRC_COLOR);
64
   NVC0_BLEND_FACTOR_CASE(SRC_ALPHA, SRC_ALPHA);
65
   NVC0_BLEND_FACTOR_CASE(DST_ALPHA, DST_ALPHA);
66
   NVC0_BLEND_FACTOR_CASE(DST_COLOR, DST_COLOR);
67
   NVC0_BLEND_FACTOR_CASE(SRC_ALPHA_SATURATE, SRC_ALPHA_SATURATE);
68
   NVC0_BLEND_FACTOR_CASE(CONST_COLOR, CONSTANT_COLOR);
69
   NVC0_BLEND_FACTOR_CASE(CONST_ALPHA, CONSTANT_ALPHA);
70
   NVC0_BLEND_FACTOR_CASE(SRC1_COLOR, SRC1_COLOR);
71
   NVC0_BLEND_FACTOR_CASE(SRC1_ALPHA, SRC1_ALPHA);
72
   NVC0_BLEND_FACTOR_CASE(ZERO, ZERO);
73
   NVC0_BLEND_FACTOR_CASE(INV_SRC_COLOR, ONE_MINUS_SRC_COLOR);
74
   NVC0_BLEND_FACTOR_CASE(INV_SRC_ALPHA, ONE_MINUS_SRC_ALPHA);
75
   NVC0_BLEND_FACTOR_CASE(INV_DST_ALPHA, ONE_MINUS_DST_ALPHA);
76
   NVC0_BLEND_FACTOR_CASE(INV_DST_COLOR, ONE_MINUS_DST_COLOR);
77
   NVC0_BLEND_FACTOR_CASE(INV_CONST_COLOR, ONE_MINUS_CONSTANT_COLOR);
78
   NVC0_BLEND_FACTOR_CASE(INV_CONST_ALPHA, ONE_MINUS_CONSTANT_ALPHA);
79
   NVC0_BLEND_FACTOR_CASE(INV_SRC1_COLOR, ONE_MINUS_SRC1_COLOR);
80
   NVC0_BLEND_FACTOR_CASE(INV_SRC1_ALPHA, ONE_MINUS_SRC1_ALPHA);
81
   default:
82
      return NV50_3D_BLEND_FACTOR_ZERO;
83
   }
84
}
85
 
86
static void *
87
nvc0_blend_state_create(struct pipe_context *pipe,
88
                        const struct pipe_blend_state *cso)
89
{
90
   struct nvc0_blend_stateobj *so = CALLOC_STRUCT(nvc0_blend_stateobj);
91
   int i;
92
   int r; /* reference */
93
   uint32_t ms;
94
   uint8_t blend_en = 0;
95
   boolean indep_masks = FALSE;
96
   boolean indep_funcs = FALSE;
97
 
98
   so->pipe = *cso;
99
 
100
   /* check which states actually have differing values */
101
   if (cso->independent_blend_enable) {
102
      for (r = 0; r < 8 && !cso->rt[r].blend_enable; ++r);
103
      blend_en |= 1 << r;
104
      for (i = r + 1; i < 8; ++i) {
105
         if (!cso->rt[i].blend_enable)
106
            continue;
107
         blend_en |= 1 << i;
108
         if (cso->rt[i].rgb_func != cso->rt[r].rgb_func ||
109
             cso->rt[i].rgb_src_factor != cso->rt[r].rgb_src_factor ||
110
             cso->rt[i].rgb_dst_factor != cso->rt[r].rgb_dst_factor ||
111
             cso->rt[i].alpha_func != cso->rt[r].alpha_func ||
112
             cso->rt[i].alpha_src_factor != cso->rt[r].alpha_src_factor ||
113
             cso->rt[i].alpha_dst_factor != cso->rt[r].alpha_dst_factor) {
114
            indep_funcs = TRUE;
115
            break;
116
         }
117
      }
118
      for (; i < 8; ++i)
119
         blend_en |= (cso->rt[i].blend_enable ? 1 : 0) << i;
120
 
121
      for (i = 1; i < 8; ++i) {
122
         if (cso->rt[i].colormask != cso->rt[0].colormask) {
123
            indep_masks = TRUE;
124
            break;
125
         }
126
      }
127
   } else {
128
      r = 0;
129
      if (cso->rt[0].blend_enable)
130
         blend_en = 0xff;
131
   }
132
 
133
   if (cso->logicop_enable) {
134
      SB_BEGIN_3D(so, LOGIC_OP_ENABLE, 2);
135
      SB_DATA    (so, 1);
136
      SB_DATA    (so, nvgl_logicop_func(cso->logicop_func));
137
 
138
      SB_IMMED_3D(so, MACRO_BLEND_ENABLES, 0);
139
   } else {
140
      SB_IMMED_3D(so, LOGIC_OP_ENABLE, 0);
141
 
142
      SB_IMMED_3D(so, BLEND_INDEPENDENT, indep_funcs);
143
      SB_IMMED_3D(so, MACRO_BLEND_ENABLES, blend_en);
144
      if (indep_funcs) {
145
         for (i = 0; i < 8; ++i) {
146
            if (cso->rt[i].blend_enable) {
147
               SB_BEGIN_3D(so, IBLEND_EQUATION_RGB(i), 6);
148
               SB_DATA    (so, nvgl_blend_eqn(cso->rt[i].rgb_func));
149
               SB_DATA    (so, nvc0_blend_fac(cso->rt[i].rgb_src_factor));
150
               SB_DATA    (so, nvc0_blend_fac(cso->rt[i].rgb_dst_factor));
151
               SB_DATA    (so, nvgl_blend_eqn(cso->rt[i].alpha_func));
152
               SB_DATA    (so, nvc0_blend_fac(cso->rt[i].alpha_src_factor));
153
               SB_DATA    (so, nvc0_blend_fac(cso->rt[i].alpha_dst_factor));
154
            }
155
         }
156
      } else
157
      if (blend_en) {
158
         SB_BEGIN_3D(so, BLEND_EQUATION_RGB, 5);
159
         SB_DATA    (so, nvgl_blend_eqn(cso->rt[r].rgb_func));
160
         SB_DATA    (so, nvc0_blend_fac(cso->rt[r].rgb_src_factor));
161
         SB_DATA    (so, nvc0_blend_fac(cso->rt[r].rgb_dst_factor));
162
         SB_DATA    (so, nvgl_blend_eqn(cso->rt[r].alpha_func));
163
         SB_DATA    (so, nvc0_blend_fac(cso->rt[r].alpha_src_factor));
164
         SB_BEGIN_3D(so, BLEND_FUNC_DST_ALPHA, 1);
165
         SB_DATA    (so, nvc0_blend_fac(cso->rt[r].alpha_dst_factor));
166
      }
167
 
168
      SB_IMMED_3D(so, COLOR_MASK_COMMON, !indep_masks);
169
      if (indep_masks) {
170
         SB_BEGIN_3D(so, COLOR_MASK(0), 8);
171
         for (i = 0; i < 8; ++i)
172
            SB_DATA(so, nvc0_colormask(cso->rt[i].colormask));
173
      } else {
174
         SB_BEGIN_3D(so, COLOR_MASK(0), 1);
175
         SB_DATA    (so, nvc0_colormask(cso->rt[0].colormask));
176
      }
177
   }
178
 
179
   ms = 0;
180
   if (cso->alpha_to_coverage)
181
      ms |= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_COVERAGE;
182
   if (cso->alpha_to_one)
183
      ms |= NVC0_3D_MULTISAMPLE_CTRL_ALPHA_TO_ONE;
184
 
185
   SB_BEGIN_3D(so, MULTISAMPLE_CTRL, 1);
186
   SB_DATA    (so, ms);
187
 
188
   assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
189
   return so;
190
}
191
 
192
static void
193
nvc0_blend_state_bind(struct pipe_context *pipe, void *hwcso)
194
{
195
    struct nvc0_context *nvc0 = nvc0_context(pipe);
196
 
197
    nvc0->blend = hwcso;
198
    nvc0->dirty |= NVC0_NEW_BLEND;
199
}
200
 
201
static void
202
nvc0_blend_state_delete(struct pipe_context *pipe, void *hwcso)
203
{
204
    FREE(hwcso);
205
}
206
 
207
/* NOTE: ignoring line_last_pixel, using FALSE (set on screen init) */
208
static void *
209
nvc0_rasterizer_state_create(struct pipe_context *pipe,
210
                             const struct pipe_rasterizer_state *cso)
211
{
212
    struct nvc0_rasterizer_stateobj *so;
213
    uint32_t reg;
214
 
215
    so = CALLOC_STRUCT(nvc0_rasterizer_stateobj);
216
    if (!so)
217
        return NULL;
218
    so->pipe = *cso;
219
 
220
    /* Scissor enables are handled in scissor state, we will not want to
221
     * always emit 16 commands, one for each scissor rectangle, here.
222
     */
223
 
224
    SB_BEGIN_3D(so, SHADE_MODEL, 1);
225
    SB_DATA    (so, cso->flatshade ? NVC0_3D_SHADE_MODEL_FLAT :
226
                                     NVC0_3D_SHADE_MODEL_SMOOTH);
227
    SB_IMMED_3D(so, PROVOKING_VERTEX_LAST, !cso->flatshade_first);
228
    SB_IMMED_3D(so, VERTEX_TWO_SIDE_ENABLE, cso->light_twoside);
229
 
230
    SB_IMMED_3D(so, VERT_COLOR_CLAMP_EN, cso->clamp_vertex_color);
231
    SB_BEGIN_3D(so, FRAG_COLOR_CLAMP_EN, 1);
232
    SB_DATA    (so, cso->clamp_fragment_color ? 0x11111111 : 0x00000000);
233
 
234
    SB_IMMED_3D(so, MULTISAMPLE_ENABLE, cso->multisample);
235
 
236
    SB_IMMED_3D(so, LINE_SMOOTH_ENABLE, cso->line_smooth);
237
    if (cso->line_smooth)
238
       SB_BEGIN_3D(so, LINE_WIDTH_SMOOTH, 1);
239
    else
240
       SB_BEGIN_3D(so, LINE_WIDTH_ALIASED, 1);
241
    SB_DATA    (so, fui(cso->line_width));
242
 
243
    SB_IMMED_3D(so, LINE_STIPPLE_ENABLE, cso->line_stipple_enable);
244
    if (cso->line_stipple_enable) {
245
        SB_BEGIN_3D(so, LINE_STIPPLE_PATTERN, 1);
246
        SB_DATA    (so, (cso->line_stipple_pattern << 8) |
247
                         cso->line_stipple_factor);
248
 
249
    }
250
 
251
    SB_IMMED_3D(so, VP_POINT_SIZE_EN, cso->point_size_per_vertex);
252
    if (!cso->point_size_per_vertex) {
253
       SB_BEGIN_3D(so, POINT_SIZE, 1);
254
       SB_DATA    (so, fui(cso->point_size));
255
    }
256
 
257
    reg = (cso->sprite_coord_mode == PIPE_SPRITE_COORD_UPPER_LEFT) ?
258
       NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_UPPER_LEFT :
259
       NVC0_3D_POINT_COORD_REPLACE_COORD_ORIGIN_LOWER_LEFT;
260
 
261
    SB_BEGIN_3D(so, POINT_COORD_REPLACE, 1);
262
    SB_DATA    (so, ((cso->sprite_coord_enable & 0xff) << 3) | reg);
263
    SB_IMMED_3D(so, POINT_SPRITE_ENABLE, cso->point_quad_rasterization);
264
    SB_IMMED_3D(so, POINT_SMOOTH_ENABLE, cso->point_smooth);
265
 
266
    SB_BEGIN_3D(so, MACRO_POLYGON_MODE_FRONT, 1);
267
    SB_DATA    (so, nvgl_polygon_mode(cso->fill_front));
268
    SB_BEGIN_3D(so, MACRO_POLYGON_MODE_BACK, 1);
269
    SB_DATA    (so, nvgl_polygon_mode(cso->fill_back));
270
    SB_IMMED_3D(so, POLYGON_SMOOTH_ENABLE, cso->poly_smooth);
271
 
272
    SB_BEGIN_3D(so, CULL_FACE_ENABLE, 3);
273
    SB_DATA    (so, cso->cull_face != PIPE_FACE_NONE);
274
    SB_DATA    (so, cso->front_ccw ? NVC0_3D_FRONT_FACE_CCW :
275
                                     NVC0_3D_FRONT_FACE_CW);
276
    switch (cso->cull_face) {
277
    case PIPE_FACE_FRONT_AND_BACK:
278
       SB_DATA(so, NVC0_3D_CULL_FACE_FRONT_AND_BACK);
279
       break;
280
    case PIPE_FACE_FRONT:
281
       SB_DATA(so, NVC0_3D_CULL_FACE_FRONT);
282
       break;
283
    case PIPE_FACE_BACK:
284
    default:
285
       SB_DATA(so, NVC0_3D_CULL_FACE_BACK);
286
       break;
287
    }
288
 
289
    SB_IMMED_3D(so, POLYGON_STIPPLE_ENABLE, cso->poly_stipple_enable);
290
    SB_BEGIN_3D(so, POLYGON_OFFSET_POINT_ENABLE, 3);
291
    SB_DATA    (so, cso->offset_point);
292
    SB_DATA    (so, cso->offset_line);
293
    SB_DATA    (so, cso->offset_tri);
294
 
295
    if (cso->offset_point || cso->offset_line || cso->offset_tri) {
296
        SB_BEGIN_3D(so, POLYGON_OFFSET_FACTOR, 1);
297
        SB_DATA    (so, fui(cso->offset_scale));
298
        SB_BEGIN_3D(so, POLYGON_OFFSET_UNITS, 1);
299
        SB_DATA    (so, fui(cso->offset_units * 2.0f));
300
        SB_BEGIN_3D(so, POLYGON_OFFSET_CLAMP, 1);
301
        SB_DATA    (so, fui(cso->offset_clamp));
302
    }
303
 
304
    if (cso->depth_clip)
305
       reg = NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1;
306
    else
307
       reg =
308
          NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK1_UNK1 |
309
          NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_NEAR |
310
          NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_CLAMP_FAR |
311
          NVC0_3D_VIEW_VOLUME_CLIP_CTRL_UNK12_UNK2;
312
 
313
    SB_BEGIN_3D(so, VIEW_VOLUME_CLIP_CTRL, 1);
314
    SB_DATA    (so, reg);
315
 
316
    assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
317
    return (void *)so;
318
}
319
 
320
static void
321
nvc0_rasterizer_state_bind(struct pipe_context *pipe, void *hwcso)
322
{
323
   struct nvc0_context *nvc0 = nvc0_context(pipe);
324
 
325
   nvc0->rast = hwcso;
326
   nvc0->dirty |= NVC0_NEW_RASTERIZER;
327
}
328
 
329
static void
330
nvc0_rasterizer_state_delete(struct pipe_context *pipe, void *hwcso)
331
{
332
   FREE(hwcso);
333
}
334
 
335
static void *
336
nvc0_zsa_state_create(struct pipe_context *pipe,
337
                      const struct pipe_depth_stencil_alpha_state *cso)
338
{
339
   struct nvc0_zsa_stateobj *so = CALLOC_STRUCT(nvc0_zsa_stateobj);
340
 
341
   so->pipe = *cso;
342
 
343
   SB_IMMED_3D(so, DEPTH_TEST_ENABLE, cso->depth.enabled);
344
   if (cso->depth.enabled) {
345
      SB_IMMED_3D(so, DEPTH_WRITE_ENABLE, cso->depth.writemask);
346
      SB_BEGIN_3D(so, DEPTH_TEST_FUNC, 1);
347
      SB_DATA    (so, nvgl_comparison_op(cso->depth.func));
348
   }
349
 
350
   if (cso->stencil[0].enabled) {
351
      SB_BEGIN_3D(so, STENCIL_ENABLE, 5);
352
      SB_DATA    (so, 1);
353
      SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].fail_op));
354
      SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].zfail_op));
355
      SB_DATA    (so, nvgl_stencil_op(cso->stencil[0].zpass_op));
356
      SB_DATA    (so, nvgl_comparison_op(cso->stencil[0].func));
357
      SB_BEGIN_3D(so, STENCIL_FRONT_FUNC_MASK, 2);
358
      SB_DATA    (so, cso->stencil[0].valuemask);
359
      SB_DATA    (so, cso->stencil[0].writemask);
360
   } else {
361
      SB_IMMED_3D(so, STENCIL_ENABLE, 0);
362
   }
363
 
364
   if (cso->stencil[1].enabled) {
365
      assert(cso->stencil[0].enabled);
366
      SB_BEGIN_3D(so, STENCIL_TWO_SIDE_ENABLE, 5);
367
      SB_DATA    (so, 1);
368
      SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].fail_op));
369
      SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].zfail_op));
370
      SB_DATA    (so, nvgl_stencil_op(cso->stencil[1].zpass_op));
371
      SB_DATA    (so, nvgl_comparison_op(cso->stencil[1].func));
372
      SB_BEGIN_3D(so, STENCIL_BACK_MASK, 2);
373
      SB_DATA    (so, cso->stencil[1].writemask);
374
      SB_DATA    (so, cso->stencil[1].valuemask);
375
   } else
376
   if (cso->stencil[0].enabled) {
377
      SB_IMMED_3D(so, STENCIL_TWO_SIDE_ENABLE, 0);
378
   }
379
 
380
   SB_IMMED_3D(so, ALPHA_TEST_ENABLE, cso->alpha.enabled);
381
   if (cso->alpha.enabled) {
382
      SB_BEGIN_3D(so, ALPHA_TEST_REF, 2);
383
      SB_DATA    (so, fui(cso->alpha.ref_value));
384
      SB_DATA    (so, nvgl_comparison_op(cso->alpha.func));
385
   }
386
 
387
   assert(so->size <= (sizeof(so->state) / sizeof(so->state[0])));
388
   return (void *)so;
389
}
390
 
391
static void
392
nvc0_zsa_state_bind(struct pipe_context *pipe, void *hwcso)
393
{
394
   struct nvc0_context *nvc0 = nvc0_context(pipe);
395
 
396
   nvc0->zsa = hwcso;
397
   nvc0->dirty |= NVC0_NEW_ZSA;
398
}
399
 
400
static void
401
nvc0_zsa_state_delete(struct pipe_context *pipe, void *hwcso)
402
{
403
   FREE(hwcso);
404
}
405
 
406
/* ====================== SAMPLERS AND TEXTURES ================================
407
 */
408
 
409
#define NV50_TSC_WRAP_CASE(n) \
410
    case PIPE_TEX_WRAP_##n: return NV50_TSC_WRAP_##n
411
 
412
static INLINE unsigned
413
nv50_tsc_wrap_mode(unsigned wrap)
414
{
415
   switch (wrap) {
416
   NV50_TSC_WRAP_CASE(REPEAT);
417
   NV50_TSC_WRAP_CASE(MIRROR_REPEAT);
418
   NV50_TSC_WRAP_CASE(CLAMP_TO_EDGE);
419
   NV50_TSC_WRAP_CASE(CLAMP_TO_BORDER);
420
   NV50_TSC_WRAP_CASE(CLAMP);
421
   NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_EDGE);
422
   NV50_TSC_WRAP_CASE(MIRROR_CLAMP_TO_BORDER);
423
   NV50_TSC_WRAP_CASE(MIRROR_CLAMP);
424
   default:
425
       NOUVEAU_ERR("unknown wrap mode: %d\n", wrap);
426
       return NV50_TSC_WRAP_REPEAT;
427
   }
428
}
429
 
430
static void
431
nvc0_sampler_state_delete(struct pipe_context *pipe, void *hwcso)
432
{
433
   unsigned s, i;
434
 
435
   for (s = 0; s < 5; ++s)
436
      for (i = 0; i < nvc0_context(pipe)->num_samplers[s]; ++i)
437
         if (nvc0_context(pipe)->samplers[s][i] == hwcso)
438
            nvc0_context(pipe)->samplers[s][i] = NULL;
439
 
440
   nvc0_screen_tsc_free(nvc0_context(pipe)->screen, nv50_tsc_entry(hwcso));
441
 
442
   FREE(hwcso);
443
}
444
 
445
static INLINE void
446
nvc0_stage_sampler_states_bind(struct nvc0_context *nvc0, int s,
447
                               unsigned nr, void **hwcso)
448
{
449
   unsigned i;
450
 
451
   for (i = 0; i < nr; ++i) {
452
      struct nv50_tsc_entry *old = nvc0->samplers[s][i];
453
 
454
      if (hwcso[i] == old)
455
         continue;
456
      nvc0->samplers_dirty[s] |= 1 << i;
457
 
458
      nvc0->samplers[s][i] = nv50_tsc_entry(hwcso[i]);
459
      if (old)
460
         nvc0_screen_tsc_unlock(nvc0->screen, old);
461
   }
462
   for (; i < nvc0->num_samplers[s]; ++i) {
463
      if (nvc0->samplers[s][i]) {
464
         nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
465
         nvc0->samplers[s][i] = NULL;
466
      }
467
   }
468
 
469
   nvc0->num_samplers[s] = nr;
470
 
471
   nvc0->dirty |= NVC0_NEW_SAMPLERS;
472
}
473
 
474
static void
475
nvc0_vp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
476
{
477
   nvc0_stage_sampler_states_bind(nvc0_context(pipe), 0, nr, s);
478
}
479
 
480
static void
481
nvc0_fp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
482
{
483
   nvc0_stage_sampler_states_bind(nvc0_context(pipe), 4, nr, s);
484
}
485
 
486
static void
487
nvc0_gp_sampler_states_bind(struct pipe_context *pipe, unsigned nr, void **s)
488
{
489
   nvc0_stage_sampler_states_bind(nvc0_context(pipe), 3, nr, s);
490
}
491
 
492
static void
493
nvc0_stage_sampler_states_bind_range(struct nvc0_context *nvc0,
494
                                     const unsigned s,
495
                                     unsigned start, unsigned nr, void **cso)
496
{
497
   const unsigned end = start + nr;
498
   int last_valid = -1;
499
   unsigned i;
500
 
501
   if (cso) {
502
      for (i = start; i < end; ++i) {
503
         const unsigned p = i - start;
504
         if (cso[p])
505
            last_valid = i;
506
         if (cso[p] == nvc0->samplers[s][i])
507
            continue;
508
         nvc0->samplers_dirty[s] |= 1 << i;
509
 
510
         if (nvc0->samplers[s][i])
511
            nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
512
         nvc0->samplers[s][i] = cso[p];
513
      }
514
   } else {
515
      for (i = start; i < end; ++i) {
516
         if (nvc0->samplers[s][i]) {
517
            nvc0_screen_tsc_unlock(nvc0->screen, nvc0->samplers[s][i]);
518
            nvc0->samplers[s][i] = NULL;
519
            nvc0->samplers_dirty[s] |= 1 << i;
520
         }
521
      }
522
   }
523
 
524
   if (nvc0->num_samplers[s] <= end) {
525
      if (last_valid < 0) {
526
         for (i = start; i && !nvc0->samplers[s][i - 1]; --i);
527
         nvc0->num_samplers[s] = i;
528
      } else {
529
         nvc0->num_samplers[s] = last_valid + 1;
530
      }
531
   }
532
}
533
 
534
static void
535
nvc0_cp_sampler_states_bind(struct pipe_context *pipe,
536
                            unsigned start, unsigned nr, void **cso)
537
{
538
   nvc0_stage_sampler_states_bind_range(nvc0_context(pipe), 5, start, nr, cso);
539
 
540
   nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SAMPLERS;
541
}
542
 
543
/* NOTE: only called when not referenced anywhere, won't be bound */
544
static void
545
nvc0_sampler_view_destroy(struct pipe_context *pipe,
546
                          struct pipe_sampler_view *view)
547
{
548
   pipe_resource_reference(&view->texture, NULL);
549
 
550
   nvc0_screen_tic_free(nvc0_context(pipe)->screen, nv50_tic_entry(view));
551
 
552
   FREE(nv50_tic_entry(view));
553
}
554
 
555
static INLINE void
556
nvc0_stage_set_sampler_views(struct nvc0_context *nvc0, int s,
557
                             unsigned nr,
558
                             struct pipe_sampler_view **views)
559
{
560
   unsigned i;
561
 
562
   for (i = 0; i < nr; ++i) {
563
      struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
564
 
565
      if (views[i] == nvc0->textures[s][i])
566
         continue;
567
      nvc0->textures_dirty[s] |= 1 << i;
568
 
569
      if (old) {
570
         nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(s, i));
571
         nvc0_screen_tic_unlock(nvc0->screen, old);
572
      }
573
 
574
      pipe_sampler_view_reference(&nvc0->textures[s][i], views[i]);
575
   }
576
 
577
   for (i = nr; i < nvc0->num_textures[s]; ++i) {
578
      struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
579
      if (old) {
580
         nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(s, i));
581
         nvc0_screen_tic_unlock(nvc0->screen, old);
582
         pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
583
      }
584
   }
585
 
586
   nvc0->num_textures[s] = nr;
587
 
588
   nvc0->dirty |= NVC0_NEW_TEXTURES;
589
}
590
 
591
static void
592
nvc0_vp_set_sampler_views(struct pipe_context *pipe,
593
                          unsigned nr,
594
                          struct pipe_sampler_view **views)
595
{
596
   nvc0_stage_set_sampler_views(nvc0_context(pipe), 0, nr, views);
597
}
598
 
599
static void
600
nvc0_fp_set_sampler_views(struct pipe_context *pipe,
601
                          unsigned nr,
602
                          struct pipe_sampler_view **views)
603
{
604
   nvc0_stage_set_sampler_views(nvc0_context(pipe), 4, nr, views);
605
}
606
 
607
static void
608
nvc0_gp_set_sampler_views(struct pipe_context *pipe,
609
                          unsigned nr,
610
                          struct pipe_sampler_view **views)
611
{
612
   nvc0_stage_set_sampler_views(nvc0_context(pipe), 3, nr, views);
613
}
614
 
615
static void
616
nvc0_stage_set_sampler_views_range(struct nvc0_context *nvc0, const unsigned s,
617
                                   unsigned start, unsigned nr,
618
                                   struct pipe_sampler_view **views)
619
{
620
   struct nouveau_bufctx *bctx = (s == 5) ? nvc0->bufctx_cp : nvc0->bufctx_3d;
621
   const unsigned end = start + nr;
622
   const unsigned bin = (s == 5) ? NVC0_BIND_CP_TEX(0) : NVC0_BIND_TEX(s, 0);
623
   int last_valid = -1;
624
   unsigned i;
625
 
626
   if (views) {
627
      for (i = start; i < end; ++i) {
628
         const unsigned p = i - start;
629
         if (views[p])
630
            last_valid = i;
631
         if (views[p] == nvc0->textures[s][i])
632
            continue;
633
         nvc0->textures_dirty[s] |= 1 << i;
634
 
635
         if (nvc0->textures[s][i]) {
636
            struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
637
            nouveau_bufctx_reset(bctx, bin + i);
638
            nvc0_screen_tic_unlock(nvc0->screen, old);
639
         }
640
         pipe_sampler_view_reference(&nvc0->textures[s][i], views[p]);
641
      }
642
   } else {
643
      for (i = start; i < end; ++i) {
644
         struct nv50_tic_entry *old = nv50_tic_entry(nvc0->textures[s][i]);
645
         if (!old)
646
            continue;
647
         nvc0->textures_dirty[s] |= 1 << i;
648
 
649
         nvc0_screen_tic_unlock(nvc0->screen, old);
650
         pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
651
         nouveau_bufctx_reset(bctx, bin + i);
652
      }
653
   }
654
 
655
   if (nvc0->num_textures[s] <= end) {
656
      if (last_valid < 0) {
657
         for (i = start; i && !nvc0->textures[s][i - 1]; --i);
658
         nvc0->num_textures[s] = i;
659
      } else {
660
         nvc0->num_textures[s] = last_valid + 1;
661
      }
662
   }
663
}
664
 
665
static void
666
nvc0_cp_set_sampler_views(struct pipe_context *pipe,
667
                          unsigned start, unsigned nr,
668
                          struct pipe_sampler_view **views)
669
{
670
   nvc0_stage_set_sampler_views_range(nvc0_context(pipe), 5, start, nr, views);
671
 
672
   nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_TEXTURES;
673
}
674
 
675
 
676
/* ============================= SHADERS =======================================
677
 */
678
 
679
static void *
680
nvc0_sp_state_create(struct pipe_context *pipe,
681
                     const struct pipe_shader_state *cso, unsigned type)
682
{
683
   struct nvc0_program *prog;
684
 
685
   prog = CALLOC_STRUCT(nvc0_program);
686
   if (!prog)
687
      return NULL;
688
 
689
   prog->type = type;
690
 
691
   if (cso->tokens)
692
      prog->pipe.tokens = tgsi_dup_tokens(cso->tokens);
693
 
694
   if (cso->stream_output.num_outputs)
695
      prog->pipe.stream_output = cso->stream_output;
696
 
697
   return (void *)prog;
698
}
699
 
700
static void
701
nvc0_sp_state_delete(struct pipe_context *pipe, void *hwcso)
702
{
703
   struct nvc0_program *prog = (struct nvc0_program *)hwcso;
704
 
705
   nvc0_program_destroy(nvc0_context(pipe), prog);
706
 
707
   FREE((void *)prog->pipe.tokens);
708
   FREE(prog);
709
}
710
 
711
static void *
712
nvc0_vp_state_create(struct pipe_context *pipe,
713
                     const struct pipe_shader_state *cso)
714
{
715
   return nvc0_sp_state_create(pipe, cso, PIPE_SHADER_VERTEX);
716
}
717
 
718
static void
719
nvc0_vp_state_bind(struct pipe_context *pipe, void *hwcso)
720
{
721
    struct nvc0_context *nvc0 = nvc0_context(pipe);
722
 
723
    nvc0->vertprog = hwcso;
724
    nvc0->dirty |= NVC0_NEW_VERTPROG;
725
}
726
 
727
static void *
728
nvc0_fp_state_create(struct pipe_context *pipe,
729
                     const struct pipe_shader_state *cso)
730
{
731
   return nvc0_sp_state_create(pipe, cso, PIPE_SHADER_FRAGMENT);
732
}
733
 
734
static void
735
nvc0_fp_state_bind(struct pipe_context *pipe, void *hwcso)
736
{
737
    struct nvc0_context *nvc0 = nvc0_context(pipe);
738
 
739
    nvc0->fragprog = hwcso;
740
    nvc0->dirty |= NVC0_NEW_FRAGPROG;
741
}
742
 
743
static void *
744
nvc0_gp_state_create(struct pipe_context *pipe,
745
                     const struct pipe_shader_state *cso)
746
{
747
   return nvc0_sp_state_create(pipe, cso, PIPE_SHADER_GEOMETRY);
748
}
749
 
750
static void
751
nvc0_gp_state_bind(struct pipe_context *pipe, void *hwcso)
752
{
753
    struct nvc0_context *nvc0 = nvc0_context(pipe);
754
 
755
    nvc0->gmtyprog = hwcso;
756
    nvc0->dirty |= NVC0_NEW_GMTYPROG;
757
}
758
 
759
static void *
760
nvc0_cp_state_create(struct pipe_context *pipe,
761
                     const struct pipe_compute_state *cso)
762
{
763
   struct nvc0_program *prog;
764
 
765
   prog = CALLOC_STRUCT(nvc0_program);
766
   if (!prog)
767
      return NULL;
768
   prog->type = PIPE_SHADER_COMPUTE;
769
 
770
   prog->cp.smem_size = cso->req_local_mem;
771
   prog->cp.lmem_size = cso->req_private_mem;
772
   prog->parm_size = cso->req_input_mem;
773
 
774
   prog->pipe.tokens = tgsi_dup_tokens((const struct tgsi_token *)cso->prog);
775
 
776
   return (void *)prog;
777
}
778
 
779
static void
780
nvc0_cp_state_bind(struct pipe_context *pipe, void *hwcso)
781
{
782
    struct nvc0_context *nvc0 = nvc0_context(pipe);
783
 
784
    nvc0->compprog = hwcso;
785
    nvc0->dirty_cp |= NVC0_NEW_CP_PROGRAM;
786
}
787
 
788
static void
789
nvc0_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
790
                         struct pipe_constant_buffer *cb)
791
{
792
   struct nvc0_context *nvc0 = nvc0_context(pipe);
793
   struct pipe_resource *res = cb ? cb->buffer : NULL;
794
   const unsigned s = nvc0_shader_stage(shader);
795
   const unsigned i = index;
796
 
797
   if (unlikely(shader == PIPE_SHADER_COMPUTE)) {
798
      assert(!cb || !cb->user_buffer);
799
      if (nvc0->constbuf[s][i].u.buf)
800
         nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_CB(i));
801
 
802
      nvc0->dirty_cp |= NVC0_NEW_CP_CONSTBUF;
803
   } else {
804
      if (nvc0->constbuf[s][i].user)
805
         nvc0->constbuf[s][i].u.buf = NULL;
806
      else
807
      if (nvc0->constbuf[s][i].u.buf)
808
         nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_CB(s, i));
809
 
810
      nvc0->dirty |= NVC0_NEW_CONSTBUF;
811
   }
812
   nvc0->constbuf_dirty[s] |= 1 << i;
813
 
814
   pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res);
815
 
816
   nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? TRUE : FALSE;
817
   if (nvc0->constbuf[s][i].user) {
818
      nvc0->constbuf[s][i].u.data = cb->user_buffer;
819
      nvc0->constbuf[s][i].size = cb->buffer_size;
820
   } else
821
   if (cb) {
822
      nvc0->constbuf[s][i].offset = cb->buffer_offset;
823
      nvc0->constbuf[s][i].size = align(cb->buffer_size, 0x100);
824
   }
825
}
826
 
827
/* =============================================================================
828
 */
829
 
830
static void
831
nvc0_set_blend_color(struct pipe_context *pipe,
832
                     const struct pipe_blend_color *bcol)
833
{
834
    struct nvc0_context *nvc0 = nvc0_context(pipe);
835
 
836
    nvc0->blend_colour = *bcol;
837
    nvc0->dirty |= NVC0_NEW_BLEND_COLOUR;
838
}
839
 
840
static void
841
nvc0_set_stencil_ref(struct pipe_context *pipe,
842
                     const struct pipe_stencil_ref *sr)
843
{
844
    struct nvc0_context *nvc0 = nvc0_context(pipe);
845
 
846
    nvc0->stencil_ref = *sr;
847
    nvc0->dirty |= NVC0_NEW_STENCIL_REF;
848
}
849
 
850
static void
851
nvc0_set_clip_state(struct pipe_context *pipe,
852
                    const struct pipe_clip_state *clip)
853
{
854
    struct nvc0_context *nvc0 = nvc0_context(pipe);
855
 
856
    memcpy(nvc0->clip.ucp, clip->ucp, sizeof(clip->ucp));
857
 
858
    nvc0->dirty |= NVC0_NEW_CLIP;
859
}
860
 
861
static void
862
nvc0_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
863
{
864
    struct nvc0_context *nvc0 = nvc0_context(pipe);
865
 
866
    nvc0->sample_mask = sample_mask;
867
    nvc0->dirty |= NVC0_NEW_SAMPLE_MASK;
868
}
869
 
870
 
871
static void
872
nvc0_set_framebuffer_state(struct pipe_context *pipe,
873
                           const struct pipe_framebuffer_state *fb)
874
{
875
    struct nvc0_context *nvc0 = nvc0_context(pipe);
876
    unsigned i;
877
 
878
    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
879
 
880
    for (i = 0; i < fb->nr_cbufs; ++i)
881
       pipe_surface_reference(&nvc0->framebuffer.cbufs[i], fb->cbufs[i]);
882
    for (; i < nvc0->framebuffer.nr_cbufs; ++i)
883
       pipe_surface_reference(&nvc0->framebuffer.cbufs[i], NULL);
884
 
885
    nvc0->framebuffer.nr_cbufs = fb->nr_cbufs;
886
 
887
    nvc0->framebuffer.width = fb->width;
888
    nvc0->framebuffer.height = fb->height;
889
 
890
    pipe_surface_reference(&nvc0->framebuffer.zsbuf, fb->zsbuf);
891
 
892
    nvc0->dirty |= NVC0_NEW_FRAMEBUFFER;
893
}
894
 
895
static void
896
nvc0_set_polygon_stipple(struct pipe_context *pipe,
897
                         const struct pipe_poly_stipple *stipple)
898
{
899
    struct nvc0_context *nvc0 = nvc0_context(pipe);
900
 
901
    nvc0->stipple = *stipple;
902
    nvc0->dirty |= NVC0_NEW_STIPPLE;
903
}
904
 
905
static void
906
nvc0_set_scissor_states(struct pipe_context *pipe,
907
                        unsigned start_slot,
908
                        unsigned num_scissors,
909
                        const struct pipe_scissor_state *scissor)
910
{
911
    struct nvc0_context *nvc0 = nvc0_context(pipe);
912
 
913
    nvc0->scissor = *scissor;
914
    nvc0->dirty |= NVC0_NEW_SCISSOR;
915
}
916
 
917
static void
918
nvc0_set_viewport_states(struct pipe_context *pipe,
919
                         unsigned start_slot,
920
                         unsigned num_viewports,
921
                         const struct pipe_viewport_state *vpt)
922
{
923
    struct nvc0_context *nvc0 = nvc0_context(pipe);
924
 
925
    nvc0->viewport = *vpt;
926
    nvc0->dirty |= NVC0_NEW_VIEWPORT;
927
}
928
 
929
static void
930
nvc0_set_vertex_buffers(struct pipe_context *pipe,
931
                        unsigned start_slot, unsigned count,
932
                        const struct pipe_vertex_buffer *vb)
933
{
934
    struct nvc0_context *nvc0 = nvc0_context(pipe);
935
    unsigned i;
936
 
937
    util_set_vertex_buffers_count(nvc0->vtxbuf, &nvc0->num_vtxbufs, vb,
938
                                  start_slot, count);
939
 
940
    if (!vb) {
941
       nvc0->vbo_user &= ~(((1ull << count) - 1) << start_slot);
942
       nvc0->constant_vbos &= ~(((1ull << count) - 1) << start_slot);
943
       return;
944
    }
945
 
946
    for (i = 0; i < count; ++i) {
947
       unsigned dst_index = start_slot + i;
948
 
949
       if (vb[i].user_buffer) {
950
          nvc0->vbo_user |= 1 << dst_index;
951
          if (!vb[i].stride)
952
             nvc0->constant_vbos |= 1 << dst_index;
953
          else
954
             nvc0->constant_vbos &= ~(1 << dst_index);
955
       } else {
956
          nvc0->vbo_user &= ~(1 << dst_index);
957
          nvc0->constant_vbos &= ~(1 << dst_index);
958
       }
959
    }
960
 
961
    nvc0->dirty |= NVC0_NEW_ARRAYS;
962
    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_VTX);
963
}
964
 
965
static void
966
nvc0_set_index_buffer(struct pipe_context *pipe,
967
                      const struct pipe_index_buffer *ib)
968
{
969
    struct nvc0_context *nvc0 = nvc0_context(pipe);
970
 
971
    if (nvc0->idxbuf.buffer)
972
       nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_IDX);
973
 
974
    if (ib) {
975
       pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
976
       nvc0->idxbuf.index_size = ib->index_size;
977
       if (ib->buffer) {
978
          nvc0->idxbuf.offset = ib->offset;
979
          nvc0->dirty |= NVC0_NEW_IDXBUF;
980
       } else {
981
          nvc0->idxbuf.user_buffer = ib->user_buffer;
982
          nvc0->dirty &= ~NVC0_NEW_IDXBUF;
983
       }
984
    } else {
985
       nvc0->dirty &= ~NVC0_NEW_IDXBUF;
986
       pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
987
    }
988
}
989
 
990
static void
991
nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
992
{
993
    struct nvc0_context *nvc0 = nvc0_context(pipe);
994
 
995
    nvc0->vertex = hwcso;
996
    nvc0->dirty |= NVC0_NEW_VERTEX;
997
}
998
 
999
static struct pipe_stream_output_target *
1000
nvc0_so_target_create(struct pipe_context *pipe,
1001
                      struct pipe_resource *res,
1002
                      unsigned offset, unsigned size)
1003
{
1004
   struct nvc0_so_target *targ = MALLOC_STRUCT(nvc0_so_target);
1005
   if (!targ)
1006
      return NULL;
1007
 
1008
   targ->pq = pipe->create_query(pipe, NVC0_QUERY_TFB_BUFFER_OFFSET);
1009
   if (!targ->pq) {
1010
      FREE(targ);
1011
      return NULL;
1012
   }
1013
   targ->clean = TRUE;
1014
 
1015
   targ->pipe.buffer_size = size;
1016
   targ->pipe.buffer_offset = offset;
1017
   targ->pipe.context = pipe;
1018
   targ->pipe.buffer = NULL;
1019
   pipe_resource_reference(&targ->pipe.buffer, res);
1020
   pipe_reference_init(&targ->pipe.reference, 1);
1021
 
1022
   return &targ->pipe;
1023
}
1024
 
1025
static void
1026
nvc0_so_target_destroy(struct pipe_context *pipe,
1027
                       struct pipe_stream_output_target *ptarg)
1028
{
1029
   struct nvc0_so_target *targ = nvc0_so_target(ptarg);
1030
   pipe->destroy_query(pipe, targ->pq);
1031
   pipe_resource_reference(&targ->pipe.buffer, NULL);
1032
   FREE(targ);
1033
}
1034
 
1035
static void
1036
nvc0_set_transform_feedback_targets(struct pipe_context *pipe,
1037
                                    unsigned num_targets,
1038
                                    struct pipe_stream_output_target **targets,
1039
                                    unsigned append_mask)
1040
{
1041
   struct nvc0_context *nvc0 = nvc0_context(pipe);
1042
   unsigned i;
1043
   boolean serialize = TRUE;
1044
 
1045
   assert(num_targets <= 4);
1046
 
1047
   for (i = 0; i < num_targets; ++i) {
1048
      if (nvc0->tfbbuf[i] == targets[i] && (append_mask & (1 << i)))
1049
         continue;
1050
      nvc0->tfbbuf_dirty |= 1 << i;
1051
 
1052
      if (nvc0->tfbbuf[i] && nvc0->tfbbuf[i] != targets[i])
1053
         nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize);
1054
 
1055
      if (targets[i] && !(append_mask & (1 << i)))
1056
         nvc0_so_target(targets[i])->clean = TRUE;
1057
 
1058
      pipe_so_target_reference(&nvc0->tfbbuf[i], targets[i]);
1059
   }
1060
   for (; i < nvc0->num_tfbbufs; ++i) {
1061
      nvc0->tfbbuf_dirty |= 1 << i;
1062
      nvc0_so_target_save_offset(pipe, nvc0->tfbbuf[i], i, &serialize);
1063
      pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
1064
   }
1065
   nvc0->num_tfbbufs = num_targets;
1066
 
1067
   if (nvc0->tfbbuf_dirty)
1068
      nvc0->dirty |= NVC0_NEW_TFB_TARGETS;
1069
}
1070
 
1071
static void
1072
nvc0_bind_surfaces_range(struct nvc0_context *nvc0, const unsigned t,
1073
                         unsigned start, unsigned nr,
1074
                         struct pipe_surface **psurfaces)
1075
{
1076
   const unsigned end = start + nr;
1077
   const unsigned mask = ((1 << nr) - 1) << start;
1078
   unsigned i;
1079
 
1080
   if (psurfaces) {
1081
      for (i = start; i < end; ++i) {
1082
         const unsigned p = i - start;
1083
         if (psurfaces[p])
1084
            nvc0->surfaces_valid[t] |= (1 << i);
1085
         else
1086
            nvc0->surfaces_valid[t] &= ~(1 << i);
1087
         pipe_surface_reference(&nvc0->surfaces[t][i], psurfaces[p]);
1088
      }
1089
   } else {
1090
      for (i = start; i < end; ++i)
1091
         pipe_surface_reference(&nvc0->surfaces[t][i], NULL);
1092
      nvc0->surfaces_valid[t] &= ~mask;
1093
   }
1094
   nvc0->surfaces_dirty[t] |= mask;
1095
 
1096
   if (t == 0)
1097
      nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_SUF);
1098
   else
1099
      nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
1100
}
1101
 
1102
static void
1103
nvc0_set_compute_resources(struct pipe_context *pipe,
1104
                           unsigned start, unsigned nr,
1105
                           struct pipe_surface **resources)
1106
{
1107
   nvc0_bind_surfaces_range(nvc0_context(pipe), 1, start, nr, resources);
1108
 
1109
   nvc0_context(pipe)->dirty_cp |= NVC0_NEW_CP_SURFACES;
1110
}
1111
 
1112
static void
1113
nvc0_set_shader_resources(struct pipe_context *pipe,
1114
                          unsigned start, unsigned nr,
1115
                          struct pipe_surface **resources)
1116
{
1117
   nvc0_bind_surfaces_range(nvc0_context(pipe), 0, start, nr, resources);
1118
 
1119
   nvc0_context(pipe)->dirty |= NVC0_NEW_SURFACES;
1120
}
1121
 
1122
static INLINE void
1123
nvc0_set_global_handle(uint32_t *phandle, struct pipe_resource *res)
1124
{
1125
   struct nv04_resource *buf = nv04_resource(res);
1126
   if (buf) {
1127
      uint64_t limit = (buf->address + buf->base.width0) - 1;
1128
      if (limit < (1ULL << 32)) {
1129
         *phandle = (uint32_t)buf->address;
1130
      } else {
1131
         NOUVEAU_ERR("Cannot map into TGSI_RESOURCE_GLOBAL: "
1132
                     "resource not contained within 32-bit address space !\n");
1133
         *phandle = 0;
1134
      }
1135
   } else {
1136
      *phandle = 0;
1137
   }
1138
}
1139
 
1140
static void
1141
nvc0_set_global_bindings(struct pipe_context *pipe,
1142
                         unsigned start, unsigned nr,
1143
                         struct pipe_resource **resources,
1144
                         uint32_t **handles)
1145
{
1146
   struct nvc0_context *nvc0 = nvc0_context(pipe);
1147
   struct pipe_resource **ptr;
1148
   unsigned i;
1149
   const unsigned end = start + nr;
1150
 
1151
   if (nvc0->global_residents.size <= (end * sizeof(struct pipe_resource *))) {
1152
      const unsigned old_size = nvc0->global_residents.size;
1153
      const unsigned req_size = end * sizeof(struct pipe_resource *);
1154
      util_dynarray_resize(&nvc0->global_residents, req_size);
1155
      memset((uint8_t *)nvc0->global_residents.data + old_size, 0,
1156
             req_size - old_size);
1157
   }
1158
 
1159
   if (resources) {
1160
      ptr = util_dynarray_element(
1161
         &nvc0->global_residents, struct pipe_resource *, start);
1162
      for (i = 0; i < nr; ++i) {
1163
         pipe_resource_reference(&ptr[i], resources[i]);
1164
         nvc0_set_global_handle(handles[i], resources[i]);
1165
      }
1166
   } else {
1167
      ptr = util_dynarray_element(
1168
         &nvc0->global_residents, struct pipe_resource *, start);
1169
      for (i = 0; i < nr; ++i)
1170
         pipe_resource_reference(&ptr[i], NULL);
1171
   }
1172
 
1173
   nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_GLOBAL);
1174
 
1175
   nvc0->dirty_cp = NVC0_NEW_CP_GLOBALS;
1176
}
1177
 
1178
void
1179
nvc0_init_state_functions(struct nvc0_context *nvc0)
1180
{
1181
   struct pipe_context *pipe = &nvc0->base.pipe;
1182
 
1183
   pipe->create_blend_state = nvc0_blend_state_create;
1184
   pipe->bind_blend_state = nvc0_blend_state_bind;
1185
   pipe->delete_blend_state = nvc0_blend_state_delete;
1186
 
1187
   pipe->create_rasterizer_state = nvc0_rasterizer_state_create;
1188
   pipe->bind_rasterizer_state = nvc0_rasterizer_state_bind;
1189
   pipe->delete_rasterizer_state = nvc0_rasterizer_state_delete;
1190
 
1191
   pipe->create_depth_stencil_alpha_state = nvc0_zsa_state_create;
1192
   pipe->bind_depth_stencil_alpha_state = nvc0_zsa_state_bind;
1193
   pipe->delete_depth_stencil_alpha_state = nvc0_zsa_state_delete;
1194
 
1195
   pipe->create_sampler_state = nv50_sampler_state_create;
1196
   pipe->delete_sampler_state = nvc0_sampler_state_delete;
1197
   pipe->bind_vertex_sampler_states   = nvc0_vp_sampler_states_bind;
1198
   pipe->bind_fragment_sampler_states = nvc0_fp_sampler_states_bind;
1199
   pipe->bind_geometry_sampler_states = nvc0_gp_sampler_states_bind;
1200
   pipe->bind_compute_sampler_states = nvc0_cp_sampler_states_bind;
1201
 
1202
   pipe->create_sampler_view = nvc0_create_sampler_view;
1203
   pipe->sampler_view_destroy = nvc0_sampler_view_destroy;
1204
   pipe->set_vertex_sampler_views   = nvc0_vp_set_sampler_views;
1205
   pipe->set_fragment_sampler_views = nvc0_fp_set_sampler_views;
1206
   pipe->set_geometry_sampler_views = nvc0_gp_set_sampler_views;
1207
   pipe->set_compute_sampler_views = nvc0_cp_set_sampler_views;
1208
 
1209
   pipe->create_vs_state = nvc0_vp_state_create;
1210
   pipe->create_fs_state = nvc0_fp_state_create;
1211
   pipe->create_gs_state = nvc0_gp_state_create;
1212
   pipe->bind_vs_state = nvc0_vp_state_bind;
1213
   pipe->bind_fs_state = nvc0_fp_state_bind;
1214
   pipe->bind_gs_state = nvc0_gp_state_bind;
1215
   pipe->delete_vs_state = nvc0_sp_state_delete;
1216
   pipe->delete_fs_state = nvc0_sp_state_delete;
1217
   pipe->delete_gs_state = nvc0_sp_state_delete;
1218
 
1219
   pipe->create_compute_state = nvc0_cp_state_create;
1220
   pipe->bind_compute_state = nvc0_cp_state_bind;
1221
   pipe->delete_compute_state = nvc0_sp_state_delete;
1222
 
1223
   pipe->set_blend_color = nvc0_set_blend_color;
1224
   pipe->set_stencil_ref = nvc0_set_stencil_ref;
1225
   pipe->set_clip_state = nvc0_set_clip_state;
1226
   pipe->set_sample_mask = nvc0_set_sample_mask;
1227
   pipe->set_constant_buffer = nvc0_set_constant_buffer;
1228
   pipe->set_framebuffer_state = nvc0_set_framebuffer_state;
1229
   pipe->set_polygon_stipple = nvc0_set_polygon_stipple;
1230
   pipe->set_scissor_states = nvc0_set_scissor_states;
1231
   pipe->set_viewport_states = nvc0_set_viewport_states;
1232
 
1233
   pipe->create_vertex_elements_state = nvc0_vertex_state_create;
1234
   pipe->delete_vertex_elements_state = nvc0_vertex_state_delete;
1235
   pipe->bind_vertex_elements_state = nvc0_vertex_state_bind;
1236
 
1237
   pipe->set_vertex_buffers = nvc0_set_vertex_buffers;
1238
   pipe->set_index_buffer = nvc0_set_index_buffer;
1239
 
1240
   pipe->create_stream_output_target = nvc0_so_target_create;
1241
   pipe->stream_output_target_destroy = nvc0_so_target_destroy;
1242
   pipe->set_stream_output_targets = nvc0_set_transform_feedback_targets;
1243
 
1244
   pipe->set_global_binding = nvc0_set_global_bindings;
1245
   pipe->set_compute_resources = nvc0_set_compute_resources;
1246
   pipe->set_shader_resources = nvc0_set_shader_resources;
1247
}
1248