Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5564 serge 1
/**************************************************************************
2
 *
3
 * Copyright 2007-2010 VMware, Inc.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
 
28
/**
29
 * \file
30
 * Implementation of fenced buffers.
31
 *
32
 * \author Jose Fonseca 
33
 * \author Thomas Hellström 
34
 */
35
 
36
 
37
#include "pipe/p_config.h"
38
 
39
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40
#include 
41
#include 
42
#endif
43
 
44
#include "pipe/p_compiler.h"
45
#include "pipe/p_defines.h"
46
#include "util/u_debug.h"
47
#include "os/os_thread.h"
48
#include "util/u_memory.h"
49
#include "util/list.h"
50
 
51
#include "pipebuffer/pb_buffer.h"
52
#include "pipebuffer/pb_bufmgr.h"
53
#include "pipebuffer/pb_buffer_fenced.h"
54
#include "vmw_screen.h"
55
 
56
 
57
/**
58
 * Convenience macro (type safe).
59
 */
60
#define SUPER(__derived) (&(__derived)->base)
61
 
62
 
63
struct fenced_manager
64
{
65
   struct pb_manager base;
66
   struct pb_manager *provider;
67
   struct pb_fence_ops *ops;
68
 
69
   /**
70
    * Following members are mutable and protected by this mutex.
71
    */
72
   pipe_mutex mutex;
73
 
74
   /**
75
    * Fenced buffer list.
76
    *
77
    * All fenced buffers are placed in this listed, ordered from the oldest
78
    * fence to the newest fence.
79
    */
80
   struct list_head fenced;
81
   pb_size num_fenced;
82
 
83
   struct list_head unfenced;
84
   pb_size num_unfenced;
85
 
86
};
87
 
88
 
89
/**
90
 * Fenced buffer.
91
 *
92
 * Wrapper around a pipe buffer which adds fencing and reference counting.
93
 */
94
struct fenced_buffer
95
{
96
   /*
97
    * Immutable members.
98
    */
99
 
100
   struct pb_buffer base;
101
   struct fenced_manager *mgr;
102
 
103
   /*
104
    * Following members are mutable and protected by fenced_manager::mutex.
105
    */
106
 
107
   struct list_head head;
108
 
109
   /**
110
    * Buffer with storage.
111
    */
112
   struct pb_buffer *buffer;
113
   pb_size size;
114
 
115
   /**
116
    * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
117
    * buffer usage.
118
    */
119
   unsigned flags;
120
 
121
   unsigned mapcount;
122
 
123
   struct pb_validate *vl;
124
   unsigned validation_flags;
125
 
126
   struct pipe_fence_handle *fence;
127
};
128
 
129
 
130
static INLINE struct fenced_manager *
131
fenced_manager(struct pb_manager *mgr)
132
{
133
   assert(mgr);
134
   return (struct fenced_manager *)mgr;
135
}
136
 
137
 
138
static INLINE struct fenced_buffer *
139
fenced_buffer(struct pb_buffer *buf)
140
{
141
   assert(buf);
142
   return (struct fenced_buffer *)buf;
143
}
144
 
145
 
146
static void
147
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
148
 
149
static enum pipe_error
150
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
151
                                        struct fenced_buffer *fenced_buf,
152
                                        const struct pb_desc *desc,
153
                                        boolean wait);
154
/**
155
 * Dump the fenced buffer list.
156
 *
157
 * Useful to understand failures to allocate buffers.
158
 */
159
static void
160
fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
161
{
162
#ifdef DEBUG
163
   struct pb_fence_ops *ops = fenced_mgr->ops;
164
   struct list_head *curr, *next;
165
   struct fenced_buffer *fenced_buf;
166
 
167
   debug_printf("%10s %7s %8s %7s %10s %s\n",
168
                "buffer", "size", "refcount", "storage", "fence", "signalled");
169
 
170
   curr = fenced_mgr->unfenced.next;
171
   next = curr->next;
172
   while(curr != &fenced_mgr->unfenced) {
173
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
174
      assert(!fenced_buf->fence);
175
      debug_printf("%10p %7u %8u %7s\n",
176
                   (void *) fenced_buf,
177
                   fenced_buf->base.size,
178
                   p_atomic_read(&fenced_buf->base.reference.count),
179
                   fenced_buf->buffer ? "gpu" : "none");
180
      curr = next;
181
      next = curr->next;
182
   }
183
 
184
   curr = fenced_mgr->fenced.next;
185
   next = curr->next;
186
   while(curr != &fenced_mgr->fenced) {
187
      int signaled;
188
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
189
      assert(fenced_buf->buffer);
190
      signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
191
      debug_printf("%10p %7u %8u %7s %10p %s\n",
192
                   (void *) fenced_buf,
193
                   fenced_buf->base.size,
194
                   p_atomic_read(&fenced_buf->base.reference.count),
195
                   "gpu",
196
                   (void *) fenced_buf->fence,
197
                   signaled == 0 ? "y" : "n");
198
      curr = next;
199
      next = curr->next;
200
   }
201
#else
202
   (void)fenced_mgr;
203
#endif
204
}
205
 
206
 
207
static INLINE void
208
fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
209
                             struct fenced_buffer *fenced_buf)
210
{
211
   assert(!pipe_is_referenced(&fenced_buf->base.reference));
212
 
213
   assert(!fenced_buf->fence);
214
   assert(fenced_buf->head.prev);
215
   assert(fenced_buf->head.next);
216
   LIST_DEL(&fenced_buf->head);
217
   assert(fenced_mgr->num_unfenced);
218
   --fenced_mgr->num_unfenced;
219
 
220
   fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
221
 
222
   FREE(fenced_buf);
223
}
224
 
225
 
226
/**
227
 * Add the buffer to the fenced list.
228
 *
229
 * Reference count should be incremented before calling this function.
230
 */
231
static INLINE void
232
fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
233
                         struct fenced_buffer *fenced_buf)
234
{
235
   assert(pipe_is_referenced(&fenced_buf->base.reference));
236
   assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
237
   assert(fenced_buf->fence);
238
 
239
   p_atomic_inc(&fenced_buf->base.reference.count);
240
 
241
   LIST_DEL(&fenced_buf->head);
242
   assert(fenced_mgr->num_unfenced);
243
   --fenced_mgr->num_unfenced;
244
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
245
   ++fenced_mgr->num_fenced;
246
}
247
 
248
 
249
/**
250
 * Remove the buffer from the fenced list, and potentially destroy the buffer
251
 * if the reference count reaches zero.
252
 *
253
 * Returns TRUE if the buffer was detroyed.
254
 */
255
static INLINE boolean
256
fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
257
                            struct fenced_buffer *fenced_buf)
258
{
259
   struct pb_fence_ops *ops = fenced_mgr->ops;
260
 
261
   assert(fenced_buf->fence);
262
   assert(fenced_buf->mgr == fenced_mgr);
263
 
264
   ops->fence_reference(ops, &fenced_buf->fence, NULL);
265
   fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
266
 
267
   assert(fenced_buf->head.prev);
268
   assert(fenced_buf->head.next);
269
 
270
   LIST_DEL(&fenced_buf->head);
271
   assert(fenced_mgr->num_fenced);
272
   --fenced_mgr->num_fenced;
273
 
274
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
275
   ++fenced_mgr->num_unfenced;
276
 
277
   if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
278
      fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
279
      return TRUE;
280
   }
281
 
282
   return FALSE;
283
}
284
 
285
 
286
/**
287
 * Wait for the fence to expire, and remove it from the fenced list.
288
 *
289
 * This function will release and re-acquire the mutex, so any copy of mutable
290
 * state must be discarded after calling it.
291
 */
292
static INLINE enum pipe_error
293
fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
294
                            struct fenced_buffer *fenced_buf)
295
{
296
   struct pb_fence_ops *ops = fenced_mgr->ops;
297
   enum pipe_error ret = PIPE_ERROR;
298
 
299
#if 0
300
   debug_warning("waiting for GPU");
301
#endif
302
 
303
   assert(pipe_is_referenced(&fenced_buf->base.reference));
304
   assert(fenced_buf->fence);
305
 
306
   if(fenced_buf->fence) {
307
      struct pipe_fence_handle *fence = NULL;
308
      int finished;
309
      boolean proceed;
310
 
311
      ops->fence_reference(ops, &fence, fenced_buf->fence);
312
 
313
      pipe_mutex_unlock(fenced_mgr->mutex);
314
 
315
      finished = ops->fence_finish(ops, fenced_buf->fence, 0);
316
 
317
      pipe_mutex_lock(fenced_mgr->mutex);
318
 
319
      assert(pipe_is_referenced(&fenced_buf->base.reference));
320
 
321
      /*
322
       * Only proceed if the fence object didn't change in the meanwhile.
323
       * Otherwise assume the work has been already carried out by another
324
       * thread that re-acquired the lock before us.
325
       */
326
      proceed = fence == fenced_buf->fence ? TRUE : FALSE;
327
 
328
      ops->fence_reference(ops, &fence, NULL);
329
 
330
      if(proceed && finished == 0) {
331
         /*
332
          * Remove from the fenced list
333
          */
334
 
335
         boolean destroyed;
336
 
337
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
338
 
339
         /* TODO: remove consequents buffers with the same fence? */
340
 
341
         assert(!destroyed);
342
 
343
         fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
344
 
345
         ret = PIPE_OK;
346
      }
347
   }
348
 
349
   return ret;
350
}
351
 
352
 
353
/**
354
 * Remove as many fenced buffers from the fenced list as possible.
355
 *
356
 * Returns TRUE if at least one buffer was removed.
357
 */
358
static boolean
359
fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
360
                                      boolean wait)
361
{
362
   struct pb_fence_ops *ops = fenced_mgr->ops;
363
   struct list_head *curr, *next;
364
   struct fenced_buffer *fenced_buf;
365
   struct pipe_fence_handle *prev_fence = NULL;
366
   boolean ret = FALSE;
367
 
368
   curr = fenced_mgr->fenced.next;
369
   next = curr->next;
370
   while(curr != &fenced_mgr->fenced) {
371
      fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
372
 
373
      if(fenced_buf->fence != prev_fence) {
374
         int signaled;
375
 
376
         if (wait) {
377
            signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
378
 
379
            /*
380
             * Don't return just now. Instead preemptively check if the
381
             * following buffers' fences already expired,
382
             * without further waits.
383
             */
384
            wait = FALSE;
385
         }
386
         else {
387
            signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
388
         }
389
 
390
         if (signaled != 0) {
391
            return ret;
392
         }
393
 
394
         prev_fence = fenced_buf->fence;
395
      }
396
      else {
397
         /* This buffer's fence object is identical to the previous buffer's
398
          * fence object, so no need to check the fence again.
399
          */
400
         assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
401
      }
402
 
403
      fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
404
 
405
      ret = TRUE;
406
 
407
      curr = next;
408
      next = curr->next;
409
   }
410
 
411
   return ret;
412
}
413
 
414
 
415
/**
416
 * Destroy the GPU storage.
417
 */
418
static void
419
fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
420
{
421
   if(fenced_buf->buffer) {
422
      pb_reference(&fenced_buf->buffer, NULL);
423
   }
424
}
425
 
426
 
427
/**
428
 * Try to create GPU storage for this buffer.
429
 *
430
 * This function is a shorthand around pb_manager::create_buffer for
431
 * fenced_buffer_create_gpu_storage_locked()'s benefit.
432
 */
433
static INLINE boolean
434
fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
435
                                            struct fenced_buffer *fenced_buf,
436
                                            const struct pb_desc *desc)
437
{
438
   struct pb_manager *provider = fenced_mgr->provider;
439
 
440
   assert(!fenced_buf->buffer);
441
 
442
   fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
443
                                                fenced_buf->size, desc);
444
   return fenced_buf->buffer ? TRUE : FALSE;
445
}
446
 
447
 
448
/**
449
 * Create GPU storage for this buffer.
450
 */
451
static enum pipe_error
452
fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
453
                                        struct fenced_buffer *fenced_buf,
454
                                        const struct pb_desc *desc,
455
                                        boolean wait)
456
{
457
   assert(!fenced_buf->buffer);
458
 
459
   /*
460
    * Check for signaled buffers before trying to allocate.
461
    */
462
   fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
463
 
464
   fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf, desc);
465
 
466
   /*
467
    * Keep trying while there is some sort of progress:
468
    * - fences are expiring,
469
    * - or buffers are being being swapped out from GPU memory into CPU memory.
470
    */
471
   while(!fenced_buf->buffer &&
472
         (fenced_manager_check_signalled_locked(fenced_mgr, FALSE))) {
473
     fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
474
                                                 desc);
475
   }
476
 
477
   if(!fenced_buf->buffer && wait) {
478
      /*
479
       * Same as before, but this time around, wait to free buffers if
480
       * necessary.
481
       */
482
      while(!fenced_buf->buffer &&
483
            (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))) {
484
        fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
485
                                                    desc);
486
      }
487
   }
488
 
489
   if(!fenced_buf->buffer) {
490
      if(0)
491
         fenced_manager_dump_locked(fenced_mgr);
492
 
493
      /* give up */
494
      return PIPE_ERROR_OUT_OF_MEMORY;
495
   }
496
 
497
   return PIPE_OK;
498
}
499
 
500
 
501
static void
502
fenced_buffer_destroy(struct pb_buffer *buf)
503
{
504
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
505
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
506
 
507
   assert(!pipe_is_referenced(&fenced_buf->base.reference));
508
 
509
   pipe_mutex_lock(fenced_mgr->mutex);
510
 
511
   fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
512
 
513
   pipe_mutex_unlock(fenced_mgr->mutex);
514
}
515
 
516
 
517
static void *
518
fenced_buffer_map(struct pb_buffer *buf,
519
                  unsigned flags, void *flush_ctx)
520
{
521
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
522
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
523
   struct pb_fence_ops *ops = fenced_mgr->ops;
524
   void *map = NULL;
525
 
526
   pipe_mutex_lock(fenced_mgr->mutex);
527
 
528
   assert(!(flags & PB_USAGE_GPU_READ_WRITE));
529
 
530
   /*
531
    * Serialize writes.
532
    */
533
   while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
534
         ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
535
          (flags & PB_USAGE_CPU_WRITE))) {
536
 
537
      /*
538
       * Don't wait for the GPU to finish accessing it,
539
       * if blocking is forbidden.
540
       */
541
      if((flags & PB_USAGE_DONTBLOCK) &&
542
          ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
543
         goto done;
544
      }
545
 
546
      if (flags & PB_USAGE_UNSYNCHRONIZED) {
547
         break;
548
      }
549
 
550
      /*
551
       * Wait for the GPU to finish accessing. This will release and re-acquire
552
       * the mutex, so all copies of mutable state must be discarded.
553
       */
554
      fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
555
   }
556
 
557
   map = pb_map(fenced_buf->buffer, flags, flush_ctx);
558
 
559
   if(map) {
560
      ++fenced_buf->mapcount;
561
      fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
562
   }
563
 
564
done:
565
   pipe_mutex_unlock(fenced_mgr->mutex);
566
 
567
   return map;
568
}
569
 
570
 
571
static void
572
fenced_buffer_unmap(struct pb_buffer *buf)
573
{
574
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
575
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
576
 
577
   pipe_mutex_lock(fenced_mgr->mutex);
578
 
579
   assert(fenced_buf->mapcount);
580
   if(fenced_buf->mapcount) {
581
      if (fenced_buf->buffer)
582
         pb_unmap(fenced_buf->buffer);
583
      --fenced_buf->mapcount;
584
      if(!fenced_buf->mapcount)
585
         fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
586
   }
587
 
588
   pipe_mutex_unlock(fenced_mgr->mutex);
589
}
590
 
591
 
592
static enum pipe_error
593
fenced_buffer_validate(struct pb_buffer *buf,
594
                       struct pb_validate *vl,
595
                       unsigned flags)
596
{
597
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
598
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
599
   enum pipe_error ret;
600
 
601
   pipe_mutex_lock(fenced_mgr->mutex);
602
 
603
   if(!vl) {
604
      /* invalidate */
605
      fenced_buf->vl = NULL;
606
      fenced_buf->validation_flags = 0;
607
      ret = PIPE_OK;
608
      goto done;
609
   }
610
 
611
   assert(flags & PB_USAGE_GPU_READ_WRITE);
612
   assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
613
   flags &= PB_USAGE_GPU_READ_WRITE;
614
 
615
   /* Buffer cannot be validated in two different lists */
616
   if(fenced_buf->vl && fenced_buf->vl != vl) {
617
      ret = PIPE_ERROR_RETRY;
618
      goto done;
619
   }
620
 
621
   if(fenced_buf->vl == vl &&
622
      (fenced_buf->validation_flags & flags) == flags) {
623
      /* Nothing to do -- buffer already validated */
624
      ret = PIPE_OK;
625
      goto done;
626
   }
627
 
628
   ret = pb_validate(fenced_buf->buffer, vl, flags);
629
   if (ret != PIPE_OK)
630
      goto done;
631
 
632
   fenced_buf->vl = vl;
633
   fenced_buf->validation_flags |= flags;
634
 
635
done:
636
   pipe_mutex_unlock(fenced_mgr->mutex);
637
 
638
   return ret;
639
}
640
 
641
 
642
static void
643
fenced_buffer_fence(struct pb_buffer *buf,
644
                    struct pipe_fence_handle *fence)
645
{
646
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
647
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
648
   struct pb_fence_ops *ops = fenced_mgr->ops;
649
 
650
   pipe_mutex_lock(fenced_mgr->mutex);
651
 
652
   assert(pipe_is_referenced(&fenced_buf->base.reference));
653
   assert(fenced_buf->buffer);
654
 
655
   if(fence != fenced_buf->fence) {
656
      assert(fenced_buf->vl);
657
      assert(fenced_buf->validation_flags);
658
 
659
      if (fenced_buf->fence) {
660
         boolean destroyed;
661
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
662
         assert(!destroyed);
663
      }
664
      if (fence) {
665
         ops->fence_reference(ops, &fenced_buf->fence, fence);
666
         fenced_buf->flags |= fenced_buf->validation_flags;
667
         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
668
      }
669
 
670
      pb_fence(fenced_buf->buffer, fence);
671
 
672
      fenced_buf->vl = NULL;
673
      fenced_buf->validation_flags = 0;
674
   }
675
 
676
   pipe_mutex_unlock(fenced_mgr->mutex);
677
}
678
 
679
 
680
static void
681
fenced_buffer_get_base_buffer(struct pb_buffer *buf,
682
                              struct pb_buffer **base_buf,
683
                              pb_size *offset)
684
{
685
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
686
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
687
 
688
   pipe_mutex_lock(fenced_mgr->mutex);
689
 
690
   assert(fenced_buf->buffer);
691
 
692
   if(fenced_buf->buffer)
693
      pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
694
   else {
695
      *base_buf = buf;
696
      *offset = 0;
697
   }
698
 
699
   pipe_mutex_unlock(fenced_mgr->mutex);
700
}
701
 
702
 
703
static const struct pb_vtbl
704
fenced_buffer_vtbl = {
705
      fenced_buffer_destroy,
706
      fenced_buffer_map,
707
      fenced_buffer_unmap,
708
      fenced_buffer_validate,
709
      fenced_buffer_fence,
710
      fenced_buffer_get_base_buffer
711
};
712
 
713
 
714
/**
715
 * Wrap a buffer in a fenced buffer.
716
 */
717
static struct pb_buffer *
718
fenced_bufmgr_create_buffer(struct pb_manager *mgr,
719
                            pb_size size,
720
                            const struct pb_desc *desc)
721
{
722
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
723
   struct fenced_buffer *fenced_buf;
724
   enum pipe_error ret;
725
 
726
   fenced_buf = CALLOC_STRUCT(fenced_buffer);
727
   if(!fenced_buf)
728
      goto no_buffer;
729
 
730
   pipe_reference_init(&fenced_buf->base.reference, 1);
731
   fenced_buf->base.alignment = desc->alignment;
732
   fenced_buf->base.usage = desc->usage;
733
   fenced_buf->base.size = size;
734
   fenced_buf->size = size;
735
 
736
   fenced_buf->base.vtbl = &fenced_buffer_vtbl;
737
   fenced_buf->mgr = fenced_mgr;
738
 
739
   pipe_mutex_lock(fenced_mgr->mutex);
740
 
741
   /*
742
    * Try to create GPU storage without stalling,
743
    */
744
   ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf,
745
                                                 desc, TRUE);
746
 
747
   /*
748
    * Give up.
749
    */
750
   if(ret != PIPE_OK) {
751
      goto no_storage;
752
   }
753
 
754
   assert(fenced_buf->buffer);
755
 
756
   LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
757
   ++fenced_mgr->num_unfenced;
758
   pipe_mutex_unlock(fenced_mgr->mutex);
759
 
760
   return &fenced_buf->base;
761
 
762
no_storage:
763
   pipe_mutex_unlock(fenced_mgr->mutex);
764
   FREE(fenced_buf);
765
no_buffer:
766
   return NULL;
767
}
768
 
769
 
770
static void
771
fenced_bufmgr_flush(struct pb_manager *mgr)
772
{
773
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
774
 
775
   pipe_mutex_lock(fenced_mgr->mutex);
776
   while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
777
      ;
778
   pipe_mutex_unlock(fenced_mgr->mutex);
779
 
780
   assert(fenced_mgr->provider->flush);
781
   if(fenced_mgr->provider->flush)
782
      fenced_mgr->provider->flush(fenced_mgr->provider);
783
}
784
 
785
 
786
static void
787
fenced_bufmgr_destroy(struct pb_manager *mgr)
788
{
789
   struct fenced_manager *fenced_mgr = fenced_manager(mgr);
790
 
791
   pipe_mutex_lock(fenced_mgr->mutex);
792
 
793
   /* Wait on outstanding fences */
794
   while (fenced_mgr->num_fenced) {
795
      pipe_mutex_unlock(fenced_mgr->mutex);
796
#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
797
      sched_yield();
798
#endif
799
      pipe_mutex_lock(fenced_mgr->mutex);
800
      while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
801
         ;
802
   }
803
 
804
#ifdef DEBUG
805
   /*assert(!fenced_mgr->num_unfenced);*/
806
#endif
807
 
808
   pipe_mutex_unlock(fenced_mgr->mutex);
809
   pipe_mutex_destroy(fenced_mgr->mutex);
810
 
811
   FREE(fenced_mgr);
812
}
813
 
814
 
815
struct pb_manager *
816
simple_fenced_bufmgr_create(struct pb_manager *provider,
817
                            struct pb_fence_ops *ops)
818
{
819
   struct fenced_manager *fenced_mgr;
820
 
821
   if(!provider)
822
      return NULL;
823
 
824
   fenced_mgr = CALLOC_STRUCT(fenced_manager);
825
   if (!fenced_mgr)
826
      return NULL;
827
 
828
   fenced_mgr->base.destroy = fenced_bufmgr_destroy;
829
   fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
830
   fenced_mgr->base.flush = fenced_bufmgr_flush;
831
 
832
   fenced_mgr->provider = provider;
833
   fenced_mgr->ops = ops;
834
 
835
   LIST_INITHEAD(&fenced_mgr->fenced);
836
   fenced_mgr->num_fenced = 0;
837
 
838
   LIST_INITHEAD(&fenced_mgr->unfenced);
839
   fenced_mgr->num_unfenced = 0;
840
 
841
   pipe_mutex_init(fenced_mgr->mutex);
842
 
843
   return &fenced_mgr->base;
844
}