Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4358 Serge 1
/*
2
 * Mesa 3-D graphics library
3
 *
4
 * Copyright (C) 2012-2013 LunarG, Inc.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included
14
 * in all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22
 * DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors:
25
 *    Chia-I Wu 
26
 */
27
 
28
#include "intel_winsys.h"
29
 
30
#include "ilo_3d_pipeline.h"
31
#include "ilo_context.h"
32
#include "ilo_cp.h"
33
#include "ilo_query.h"
34
#include "ilo_shader.h"
35
#include "ilo_state.h"
36
#include "ilo_3d.h"
37
 
38
static void
39
process_query_for_occlusion_counter(struct ilo_3d *hw3d,
40
                                    struct ilo_query *q)
41
{
42
   uint64_t *vals, depth_count = 0;
43
   int i;
44
 
45
   /* in pairs */
46
   assert(q->reg_read % 2 == 0);
47
 
48
   intel_bo_map(q->bo, false);
49
   vals = intel_bo_get_virtual(q->bo);
50
   for (i = 1; i < q->reg_read; i += 2)
51
      depth_count += vals[i] - vals[i - 1];
52
   intel_bo_unmap(q->bo);
53
 
54
   /* accumulate so that the query can be resumed if wanted */
55
   q->data.u64 += depth_count;
56
   q->reg_read = 0;
57
}
58
 
59
static uint64_t
60
timestamp_to_ns(uint64_t timestamp)
61
{
62
   /* see ilo_get_timestamp() */
63
   return (timestamp & 0xffffffff) * 80;
64
}
65
 
66
static void
67
process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
68
{
69
   uint64_t *vals, timestamp;
70
 
71
   assert(q->reg_read == 1);
72
 
73
   intel_bo_map(q->bo, false);
74
   vals = intel_bo_get_virtual(q->bo);
75
   timestamp = vals[0];
76
   intel_bo_unmap(q->bo);
77
 
78
   q->data.u64 = timestamp_to_ns(timestamp);
79
   q->reg_read = 0;
80
}
81
 
82
static void
83
process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
84
{
85
   uint64_t *vals, elapsed = 0;
86
   int i;
87
 
88
   /* in pairs */
89
   assert(q->reg_read % 2 == 0);
90
 
91
   intel_bo_map(q->bo, false);
92
   vals = intel_bo_get_virtual(q->bo);
93
 
94
   for (i = 1; i < q->reg_read; i += 2)
95
      elapsed += vals[i] - vals[i - 1];
96
 
97
   intel_bo_unmap(q->bo);
98
 
99
   /* accumulate so that the query can be resumed if wanted */
100
   q->data.u64 += timestamp_to_ns(elapsed);
101
   q->reg_read = 0;
102
}
103
 
104
static void
105
ilo_3d_resume_queries(struct ilo_3d *hw3d)
106
{
107
   struct ilo_query *q;
108
 
109
   /* resume occlusion queries */
110
   LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
111
      /* accumulate the result if the bo is alreay full */
112
      if (q->reg_read >= q->reg_total)
113
         process_query_for_occlusion_counter(hw3d, q);
114
 
115
      ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
116
            q->bo, q->reg_read++);
117
   }
118
 
119
   /* resume timer queries */
120
   LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
121
      /* accumulate the result if the bo is alreay full */
122
      if (q->reg_read >= q->reg_total)
123
         process_query_for_time_elapsed(hw3d, q);
124
 
125
      ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
126
            q->bo, q->reg_read++);
127
   }
128
}
129
 
130
static void
131
ilo_3d_pause_queries(struct ilo_3d *hw3d)
132
{
133
   struct ilo_query *q;
134
 
135
   /* pause occlusion queries */
136
   LIST_FOR_EACH_ENTRY(q, &hw3d->occlusion_queries, list) {
137
      assert(q->reg_read < q->reg_total);
138
      ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
139
            q->bo, q->reg_read++);
140
   }
141
 
142
   /* pause timer queries */
143
   LIST_FOR_EACH_ENTRY(q, &hw3d->time_elapsed_queries, list) {
144
      assert(q->reg_read < q->reg_total);
145
      ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
146
            q->bo, q->reg_read++);
147
   }
148
}
149
 
150
static void
151
ilo_3d_release_render_ring(struct ilo_cp *cp, void *data)
152
{
153
   struct ilo_3d *hw3d = data;
154
 
155
   ilo_3d_pause_queries(hw3d);
156
}
157
 
158
static void
159
ilo_3d_own_render_ring(struct ilo_3d *hw3d)
160
{
161
   ilo_cp_set_ring(hw3d->cp, ILO_CP_RING_RENDER);
162
 
163
   if (ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve))
164
      ilo_3d_resume_queries(hw3d);
165
}
166
 
167
/**
168
 * Begin a query.
169
 */
170
void
171
ilo_3d_begin_query(struct ilo_context *ilo, struct ilo_query *q)
172
{
173
   struct ilo_3d *hw3d = ilo->hw3d;
174
 
175
   ilo_3d_own_render_ring(hw3d);
176
 
177
   switch (q->type) {
178
   case PIPE_QUERY_OCCLUSION_COUNTER:
179
      /* reserve some space for pausing the query */
180
      q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
181
            ILO_3D_PIPELINE_WRITE_DEPTH_COUNT, NULL);
182
      hw3d->owner_reserve += q->reg_cmd_size;
183
      ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
184
 
185
      q->data.u64 = 0;
186
 
187
      if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
188
         /* XXX we should check the aperture size */
189
         ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
190
               q->bo, q->reg_read++);
191
 
192
         list_add(&q->list, &hw3d->occlusion_queries);
193
      }
194
      break;
195
   case PIPE_QUERY_TIMESTAMP:
196
      /* nop */
197
      break;
198
   case PIPE_QUERY_TIME_ELAPSED:
199
      /* reserve some space for pausing the query */
200
      q->reg_cmd_size = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
201
            ILO_3D_PIPELINE_WRITE_TIMESTAMP, NULL);
202
      hw3d->owner_reserve += q->reg_cmd_size;
203
      ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
204
 
205
      q->data.u64 = 0;
206
 
207
      if (ilo_query_alloc_bo(q, 2, -1, hw3d->cp->winsys)) {
208
         /* XXX we should check the aperture size */
209
         ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
210
               q->bo, q->reg_read++);
211
 
212
         list_add(&q->list, &hw3d->time_elapsed_queries);
213
      }
214
      break;
215
   case PIPE_QUERY_PRIMITIVES_GENERATED:
216
      q->data.u64 = 0;
217
      list_add(&q->list, &hw3d->prim_generated_queries);
218
      break;
219
   case PIPE_QUERY_PRIMITIVES_EMITTED:
220
      q->data.u64 = 0;
221
      list_add(&q->list, &hw3d->prim_emitted_queries);
222
      break;
223
   default:
224
      assert(!"unknown query type");
225
      break;
226
   }
227
}
228
 
229
/**
230
 * End a query.
231
 */
232
void
233
ilo_3d_end_query(struct ilo_context *ilo, struct ilo_query *q)
234
{
235
   struct ilo_3d *hw3d = ilo->hw3d;
236
 
237
   ilo_3d_own_render_ring(hw3d);
238
 
239
   switch (q->type) {
240
   case PIPE_QUERY_OCCLUSION_COUNTER:
241
      list_del(&q->list);
242
 
243
      assert(q->reg_read < q->reg_total);
244
      hw3d->owner_reserve -= q->reg_cmd_size;
245
      ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
246
      ilo_3d_pipeline_emit_write_depth_count(hw3d->pipeline,
247
            q->bo, q->reg_read++);
248
      break;
249
   case PIPE_QUERY_TIMESTAMP:
250
      q->data.u64 = 0;
251
 
252
      if (ilo_query_alloc_bo(q, 1, 1, hw3d->cp->winsys)) {
253
         ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
254
               q->bo, q->reg_read++);
255
      }
256
      break;
257
   case PIPE_QUERY_TIME_ELAPSED:
258
      list_del(&q->list);
259
 
260
      assert(q->reg_read < q->reg_total);
261
      hw3d->owner_reserve -= q->reg_cmd_size;
262
      ilo_cp_set_owner(hw3d->cp, &hw3d->owner, hw3d->owner_reserve);
263
      ilo_3d_pipeline_emit_write_timestamp(hw3d->pipeline,
264
            q->bo, q->reg_read++);
265
      break;
266
   case PIPE_QUERY_PRIMITIVES_GENERATED:
267
   case PIPE_QUERY_PRIMITIVES_EMITTED:
268
      list_del(&q->list);
269
      break;
270
   default:
271
      assert(!"unknown query type");
272
      break;
273
   }
274
}
275
 
276
/**
277
 * Process the raw query data.
278
 */
279
void
280
ilo_3d_process_query(struct ilo_context *ilo, struct ilo_query *q)
281
{
282
   struct ilo_3d *hw3d = ilo->hw3d;
283
 
284
   switch (q->type) {
285
   case PIPE_QUERY_OCCLUSION_COUNTER:
286
      if (q->bo)
287
         process_query_for_occlusion_counter(hw3d, q);
288
      break;
289
   case PIPE_QUERY_TIMESTAMP:
290
      if (q->bo)
291
         process_query_for_timestamp(hw3d, q);
292
      break;
293
   case PIPE_QUERY_TIME_ELAPSED:
294
      if (q->bo)
295
         process_query_for_time_elapsed(hw3d, q);
296
      break;
297
   case PIPE_QUERY_PRIMITIVES_GENERATED:
298
   case PIPE_QUERY_PRIMITIVES_EMITTED:
299
      break;
300
   default:
301
      assert(!"unknown query type");
302
      break;
303
   }
304
}
305
 
306
/**
307
 * Hook for CP new-batch.
308
 */
309
void
310
ilo_3d_cp_flushed(struct ilo_3d *hw3d)
311
{
312
   if (ilo_debug & ILO_DEBUG_3D)
313
      ilo_3d_pipeline_dump(hw3d->pipeline);
314
 
315
   /* invalidate the pipeline */
316
   ilo_3d_pipeline_invalidate(hw3d->pipeline,
317
         ILO_3D_PIPELINE_INVALIDATE_BATCH_BO |
318
         ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
319
   if (!hw3d->cp->render_ctx) {
320
      ilo_3d_pipeline_invalidate(hw3d->pipeline,
321
            ILO_3D_PIPELINE_INVALIDATE_HW);
322
   }
323
 
324
   hw3d->new_batch = true;
325
}
326
 
327
/**
328
 * Create a 3D context.
329
 */
330
struct ilo_3d *
331
ilo_3d_create(struct ilo_cp *cp, const struct ilo_dev_info *dev)
332
{
333
   struct ilo_3d *hw3d;
334
 
335
   hw3d = CALLOC_STRUCT(ilo_3d);
336
   if (!hw3d)
337
      return NULL;
338
 
339
   hw3d->cp = cp;
340
   hw3d->owner.release_callback = ilo_3d_release_render_ring;
341
   hw3d->owner.release_data = hw3d;
342
 
343
   hw3d->new_batch = true;
344
 
345
   list_inithead(&hw3d->occlusion_queries);
346
   list_inithead(&hw3d->time_elapsed_queries);
347
   list_inithead(&hw3d->prim_generated_queries);
348
   list_inithead(&hw3d->prim_emitted_queries);
349
 
350
   hw3d->pipeline = ilo_3d_pipeline_create(cp, dev);
351
   if (!hw3d->pipeline) {
352
      FREE(hw3d);
353
      return NULL;
354
   }
355
 
356
   return hw3d;
357
}
358
 
359
/**
360
 * Destroy a 3D context.
361
 */
362
void
363
ilo_3d_destroy(struct ilo_3d *hw3d)
364
{
365
   ilo_3d_pipeline_destroy(hw3d->pipeline);
366
 
367
   if (hw3d->kernel.bo)
368
      intel_bo_unreference(hw3d->kernel.bo);
369
 
370
   FREE(hw3d);
371
}
372
 
373
static bool
374
draw_vbo(struct ilo_3d *hw3d, const struct ilo_context *ilo,
375
         int *prim_generated, int *prim_emitted)
376
{
377
   bool need_flush = false;
378
   int max_len;
379
 
380
   ilo_3d_own_render_ring(hw3d);
381
 
382
   if (!hw3d->new_batch) {
383
      /*
384
       * Without a better tracking mechanism, when the framebuffer changes, we
385
       * have to assume that the old framebuffer may be sampled from.  If that
386
       * happens in the middle of a batch buffer, we need to insert manual
387
       * flushes.
388
       */
389
      need_flush = (ilo->dirty & ILO_DIRTY_FB);
390
 
391
      /* same to SO target changes */
392
      need_flush |= (ilo->dirty & ILO_DIRTY_SO);
393
   }
394
 
395
   /* make sure there is enough room first */
396
   max_len = ilo_3d_pipeline_estimate_size(hw3d->pipeline,
397
         ILO_3D_PIPELINE_DRAW, ilo);
398
   if (need_flush) {
399
      max_len += ilo_3d_pipeline_estimate_size(hw3d->pipeline,
400
            ILO_3D_PIPELINE_FLUSH, NULL);
401
   }
402
 
403
   if (max_len > ilo_cp_space(hw3d->cp)) {
404
      ilo_cp_flush(hw3d->cp);
405
      need_flush = false;
406
      assert(max_len <= ilo_cp_space(hw3d->cp));
407
   }
408
 
409
   if (need_flush)
410
      ilo_3d_pipeline_emit_flush(hw3d->pipeline);
411
 
412
   return ilo_3d_pipeline_emit_draw(hw3d->pipeline, ilo,
413
         prim_generated, prim_emitted);
414
}
415
 
416
static void
417
update_prim_count(struct ilo_3d *hw3d, int generated, int emitted)
418
{
419
   struct ilo_query *q;
420
 
421
   LIST_FOR_EACH_ENTRY(q, &hw3d->prim_generated_queries, list)
422
      q->data.u64 += generated;
423
 
424
   LIST_FOR_EACH_ENTRY(q, &hw3d->prim_emitted_queries, list)
425
      q->data.u64 += emitted;
426
}
427
 
428
bool
429
ilo_3d_pass_render_condition(struct ilo_context *ilo)
430
{
431
   struct ilo_3d *hw3d = ilo->hw3d;
432
   uint64_t result;
433
   bool wait;
434
 
435
   if (!hw3d->render_condition.query)
436
      return true;
437
 
438
   switch (hw3d->render_condition.mode) {
439
   case PIPE_RENDER_COND_WAIT:
440
   case PIPE_RENDER_COND_BY_REGION_WAIT:
441
      wait = true;
442
      break;
443
   case PIPE_RENDER_COND_NO_WAIT:
444
   case PIPE_RENDER_COND_BY_REGION_NO_WAIT:
445
   default:
446
      wait = false;
447
      break;
448
   }
449
 
450
   if (ilo->base.get_query_result(&ilo->base, hw3d->render_condition.query,
451
            wait, (union pipe_query_result *) &result))
452
      return (!result == hw3d->render_condition.cond);
453
   else
454
      return true;
455
}
456
 
457
#define UPDATE_MIN2(a, b) (a) = MIN2((a), (b))
458
#define UPDATE_MAX2(a, b) (a) = MAX2((a), (b))
459
 
460
/**
461
 * \see find_sub_primitives() from core mesa
462
 */
463
static int
464
ilo_find_sub_primitives(const void *elements, unsigned element_size,
465
                    const struct pipe_draw_info *orig_info,
466
                    struct pipe_draw_info *info)
467
{
468
   const unsigned max_prims = orig_info->count - orig_info->start;
469
   unsigned i, cur_start, cur_count;
470
   int scan_index;
471
   unsigned scan_num;
472
 
473
   cur_start = orig_info->start;
474
   cur_count = 0;
475
   scan_num = 0;
476
 
477
#define IB_INDEX_READ(TYPE, INDEX) (((const TYPE *) elements)[INDEX])
478
 
479
#define SCAN_ELEMENTS(TYPE) \
480
   info[scan_num] = *orig_info; \
481
   info[scan_num].primitive_restart = false; \
482
   for (i = orig_info->start; i < orig_info->count; i++) { \
483
      scan_index = IB_INDEX_READ(TYPE, i); \
484
      if (scan_index == orig_info->restart_index) { \
485
         if (cur_count > 0) { \
486
            assert(scan_num < max_prims); \
487
            info[scan_num].start = cur_start; \
488
            info[scan_num].count = cur_count; \
489
            scan_num++; \
490
            info[scan_num] = *orig_info; \
491
            info[scan_num].primitive_restart = false; \
492
         } \
493
         cur_start = i + 1; \
494
         cur_count = 0; \
495
      } \
496
      else { \
497
         UPDATE_MIN2(info[scan_num].min_index, scan_index); \
498
         UPDATE_MAX2(info[scan_num].max_index, scan_index); \
499
         cur_count++; \
500
      } \
501
   } \
502
   if (cur_count > 0) { \
503
      assert(scan_num < max_prims); \
504
      info[scan_num].start = cur_start; \
505
      info[scan_num].count = cur_count; \
506
      scan_num++; \
507
   }
508
 
509
   switch (element_size) {
510
   case 1:
511
      SCAN_ELEMENTS(uint8_t);
512
      break;
513
   case 2:
514
      SCAN_ELEMENTS(uint16_t);
515
      break;
516
   case 4:
517
      SCAN_ELEMENTS(uint32_t);
518
      break;
519
   default:
520
      assert(0 && "bad index_size in find_sub_primitives()");
521
   }
522
 
523
#undef SCAN_ELEMENTS
524
 
525
   return scan_num;
526
}
527
 
528
static inline bool
529
ilo_check_restart_index(const struct ilo_context *ilo, unsigned restart_index)
530
{
531
   /*
532
    * Haswell (GEN(7.5)) supports an arbitrary cut index, check everything
533
    * older.
534
    */
535
   if (ilo->dev->gen >= ILO_GEN(7.5))
536
      return true;
537
 
538
   /* Note: indices must be unsigned byte, unsigned short or unsigned int */
539
   switch (ilo->ib.index_size) {
540
   case 1:
541
      return ((restart_index & 0xff) == 0xff);
542
      break;
543
   case 2:
544
      return ((restart_index & 0xffff) == 0xffff);
545
      break;
546
   case 4:
547
      return (restart_index == 0xffffffff);
548
      break;
549
   }
550
   return false;
551
}
552
 
553
static inline bool
554
ilo_check_restart_prim_type(const struct ilo_context *ilo, unsigned prim)
555
{
556
   switch (prim) {
557
   case PIPE_PRIM_POINTS:
558
   case PIPE_PRIM_LINES:
559
   case PIPE_PRIM_LINE_STRIP:
560
   case PIPE_PRIM_TRIANGLES:
561
   case PIPE_PRIM_TRIANGLE_STRIP:
562
      /* All 965 GEN graphics support a cut index for these primitive types */
563
      return true;
564
      break;
565
 
566
   case PIPE_PRIM_LINE_LOOP:
567
   case PIPE_PRIM_POLYGON:
568
   case PIPE_PRIM_QUAD_STRIP:
569
   case PIPE_PRIM_QUADS:
570
   case PIPE_PRIM_TRIANGLE_FAN:
571
      if (ilo->dev->gen >= ILO_GEN(7.5)) {
572
         /* Haswell and newer parts can handle these prim types. */
573
         return true;
574
      }
575
      break;
576
   }
577
 
578
   return false;
579
}
580
 
581
/*
582
 * Handle VBOs using primitive restart.
583
 * Verify that restart index and primitive type can be handled by the HW.
584
 * Return true if this routine did the rendering
585
 * Return false if this routine did NOT render because restart can be handled
586
 * in HW.
587
 */
588
static void
589
ilo_draw_vbo_with_sw_restart(struct pipe_context *pipe,
590
                             const struct pipe_draw_info *info)
591
{
592
   struct ilo_context *ilo = ilo_context(pipe);
593
   struct pipe_draw_info *restart_info = NULL;
594
   int sub_prim_count = 1;
595
 
596
   /*
597
    * We have to break up the primitive into chunks manually
598
    * Worst case, every other index could be a restart index so
599
    * need to have space for that many primitives
600
    */
601
   restart_info = MALLOC(((info->count + 1) / 2) * sizeof(*info));
602
   if (NULL == restart_info) {
603
      /* If we can't get memory for this, bail out */
604
      ilo_err("%s:%d - Out of memory", __FILE__, __LINE__);
605
      return;
606
   }
607
 
608
   if (ilo->ib.buffer) {
609
      struct pipe_transfer *transfer;
610
      const void *map;
611
 
612
      map = pipe_buffer_map(pipe, ilo->ib.buffer,
613
            PIPE_TRANSFER_READ, &transfer);
614
 
615
      sub_prim_count = ilo_find_sub_primitives(map + ilo->ib.offset,
616
            ilo->ib.index_size, info, restart_info);
617
 
618
      pipe_buffer_unmap(pipe, transfer);
619
   }
620
   else {
621
      sub_prim_count = ilo_find_sub_primitives(ilo->ib.user_buffer,
622
               ilo->ib.index_size, info, restart_info);
623
   }
624
 
625
   info = restart_info;
626
 
627
   while (sub_prim_count > 0) {
628
      pipe->draw_vbo(pipe, info);
629
 
630
      sub_prim_count--;
631
      info++;
632
   }
633
 
634
   FREE(restart_info);
635
}
636
 
637
static bool
638
upload_shaders(struct ilo_3d *hw3d, struct ilo_shader_cache *shc)
639
{
640
   bool incremental = true;
641
   int upload;
642
 
643
   upload = ilo_shader_cache_upload(shc,
644
         NULL, hw3d->kernel.used, incremental);
645
   if (!upload)
646
      return true;
647
 
648
   /*
649
    * Allocate a new bo.  When this is a new batch, assume the bo is still in
650
    * use by the previous batch and force allocation.
651
    *
652
    * Does it help to make shader cache upload with unsynchronized mapping,
653
    * and remove the check for new batch here?
654
    */
655
   if (hw3d->kernel.used + upload > hw3d->kernel.size || hw3d->new_batch) {
656
      unsigned new_size = (hw3d->kernel.size) ?
657
         hw3d->kernel.size : (8 * 1024);
658
 
659
      while (hw3d->kernel.used + upload > new_size)
660
         new_size *= 2;
661
 
662
      if (hw3d->kernel.bo)
663
         intel_bo_unreference(hw3d->kernel.bo);
664
 
665
      hw3d->kernel.bo = intel_winsys_alloc_buffer(hw3d->cp->winsys,
666
            "kernel bo", new_size, 0);
667
      if (!hw3d->kernel.bo) {
668
         ilo_err("failed to allocate kernel bo\n");
669
         return false;
670
      }
671
 
672
      hw3d->kernel.used = 0;
673
      hw3d->kernel.size = new_size;
674
      incremental = false;
675
 
676
      assert(new_size >= ilo_shader_cache_upload(shc,
677
            NULL, hw3d->kernel.used, incremental));
678
 
679
      ilo_3d_pipeline_invalidate(hw3d->pipeline,
680
            ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
681
   }
682
 
683
   upload = ilo_shader_cache_upload(shc,
684
         hw3d->kernel.bo, hw3d->kernel.used, incremental);
685
   if (upload < 0) {
686
      ilo_err("failed to upload shaders\n");
687
      return false;
688
   }
689
 
690
   hw3d->kernel.used += upload;
691
 
692
   assert(hw3d->kernel.used <= hw3d->kernel.size);
693
 
694
   return true;
695
}
696
 
697
static void
698
ilo_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
699
{
700
   struct ilo_context *ilo = ilo_context(pipe);
701
   struct ilo_3d *hw3d = ilo->hw3d;
702
   int prim_generated, prim_emitted;
703
 
704
   if (!ilo_3d_pass_render_condition(ilo))
705
      return;
706
 
707
   if (info->primitive_restart && info->indexed) {
708
      /*
709
       * Want to draw an indexed primitive using primitive restart
710
       * Check that HW can handle the request and fall to SW if not.
711
       */
712
      if (!ilo_check_restart_index(ilo, info->restart_index) ||
713
          !ilo_check_restart_prim_type(ilo, info->mode)) {
714
         ilo_draw_vbo_with_sw_restart(pipe, info);
715
         return;
716
      }
717
   }
718
 
719
   ilo_finalize_3d_states(ilo, info);
720
 
721
   if (!upload_shaders(hw3d, ilo->shader_cache))
722
      return;
723
 
724
   /* If draw_vbo ever fails, return immediately. */
725
   if (!draw_vbo(hw3d, ilo, &prim_generated, &prim_emitted))
726
      return;
727
 
728
   /* clear dirty status */
729
   ilo->dirty = 0x0;
730
   hw3d->new_batch = false;
731
 
732
   /* avoid dangling pointer reference */
733
   ilo->draw = NULL;
734
 
735
   update_prim_count(hw3d, prim_generated, prim_emitted);
736
 
737
   if (ilo_debug & ILO_DEBUG_NOCACHE)
738
      ilo_3d_pipeline_emit_flush(hw3d->pipeline);
739
}
740
 
741
static void
742
ilo_render_condition(struct pipe_context *pipe,
743
                     struct pipe_query *query,
744
                     boolean condition,
745
                     uint mode)
746
{
747
   struct ilo_context *ilo = ilo_context(pipe);
748
   struct ilo_3d *hw3d = ilo->hw3d;
749
 
750
   /* reference count? */
751
   hw3d->render_condition.query = query;
752
   hw3d->render_condition.mode = mode;
753
   hw3d->render_condition.cond = condition;
754
}
755
 
756
static void
757
ilo_texture_barrier(struct pipe_context *pipe)
758
{
759
   struct ilo_context *ilo = ilo_context(pipe);
760
   struct ilo_3d *hw3d = ilo->hw3d;
761
 
762
   if (ilo->cp->ring != ILO_CP_RING_RENDER)
763
      return;
764
 
765
   ilo_3d_pipeline_emit_flush(hw3d->pipeline);
766
 
767
   /* don't know why */
768
   if (ilo->dev->gen >= ILO_GEN(7))
769
      ilo_cp_flush(hw3d->cp);
770
}
771
 
772
static void
773
ilo_get_sample_position(struct pipe_context *pipe,
774
                        unsigned sample_count,
775
                        unsigned sample_index,
776
                        float *out_value)
777
{
778
   struct ilo_context *ilo = ilo_context(pipe);
779
   struct ilo_3d *hw3d = ilo->hw3d;
780
 
781
   ilo_3d_pipeline_get_sample_position(hw3d->pipeline,
782
         sample_count, sample_index,
783
         &out_value[0], &out_value[1]);
784
}
785
 
786
/**
787
 * Initialize 3D-related functions.
788
 */
789
void
790
ilo_init_3d_functions(struct ilo_context *ilo)
791
{
792
   ilo->base.draw_vbo = ilo_draw_vbo;
793
   ilo->base.render_condition = ilo_render_condition;
794
   ilo->base.texture_barrier = ilo_texture_barrier;
795
   ilo->base.get_sample_position = ilo_get_sample_position;
796
}