Subversion Repositories Kolibri OS

Rev

Rev 4358 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4358 Serge 1
 
2
#include "util/u_memory.h"
3
#include "util/u_math.h"
4
#include "util/u_surface.h"
5
6
 
7
#include "nouveau_context.h"
8
#include "nouveau_winsys.h"
9
#include "nouveau_fence.h"
10
#include "nouveau_buffer.h"
11
#include "nouveau_mm.h"
12
13
 
14
15
 
16
   struct pipe_transfer base;
17
18
 
19
   struct nouveau_bo *bo;
20
   struct nouveau_mm_allocation *mm;
21
   uint32_t offset;
22
};
23
24
 
25
nouveau_transfer(struct pipe_transfer *transfer)
26
{
27
   return (struct nouveau_transfer *)transfer;
28
}
29
30
 
31
nouveau_buffer_malloc(struct nv04_resource *buf)
32
{
33
   if (!buf->data)
34
      buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
35
   return !!buf->data;
36
}
37
38
 
39
nouveau_buffer_allocate(struct nouveau_screen *screen,
40
                        struct nv04_resource *buf, unsigned domain)
41
{
42
   uint32_t size = buf->base.width0;
43
44
 
45
                         PIPE_BIND_COMPUTE_RESOURCE |
46
                         PIPE_BIND_SHADER_RESOURCE))
47
      size = align(size, 0x100);
48
49
 
50
      buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
51
                                    &buf->bo, &buf->offset);
52
      if (!buf->bo)
53
         return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
54
      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
55
   } else
56
   if (domain == NOUVEAU_BO_GART) {
57
      buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
58
                                    &buf->bo, &buf->offset);
59
      if (!buf->bo)
60
         return FALSE;
61
      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
62
   } else {
63
      assert(domain == 0);
64
      if (!nouveau_buffer_malloc(buf))
65
         return FALSE;
66
   }
67
   buf->domain = domain;
68
   if (buf->bo)
69
      buf->address = buf->bo->offset + buf->offset;
70
71
 
72
}
73
74
 
75
release_allocation(struct nouveau_mm_allocation **mm,
76
                   struct nouveau_fence *fence)
77
{
78
   nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
79
   (*mm) = NULL;
80
}
81
82
 
83
nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
84
{
85
   nouveau_bo_ref(NULL, &buf->bo);
86
87
 
88
      release_allocation(&buf->mm, buf->fence);
89
90
 
91
      NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
92
   if (buf->domain == NOUVEAU_BO_GART)
93
      NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);
94
95
 
96
}
97
98
 
99
nouveau_buffer_reallocate(struct nouveau_screen *screen,
100
                          struct nv04_resource *buf, unsigned domain)
101
{
102
   nouveau_buffer_release_gpu_storage(buf);
103
104
 
105
   nouveau_fence_ref(NULL, &buf->fence_wr);
106
107
 
108
109
 
110
}
111
112
 
113
nouveau_buffer_destroy(struct pipe_screen *pscreen,
114
                       struct pipe_resource *presource)
115
{
116
   struct nv04_resource *res = nv04_resource(presource);
117
118
 
119
120
 
121
      align_free(res->data);
122
123
 
124
   nouveau_fence_ref(NULL, &res->fence_wr);
125
126
 
127
128
 
129
}
130
131
 
132
nouveau_transfer_staging(struct nouveau_context *nv,
133
                         struct nouveau_transfer *tx, boolean permit_pb)
134
{
135
   const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
136
   const unsigned size = align(tx->base.box.width, 4) + adj;
137
138
 
139
      permit_pb = FALSE;
140
141
 
142
      tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
143
      if (tx->map)
144
         tx->map += adj;
145
   } else {
146
      tx->mm =
147
         nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset);
148
      if (tx->bo) {
149
         tx->offset += adj;
150
         if (!nouveau_bo_map(tx->bo, 0, NULL))
151
            tx->map = (uint8_t *)tx->bo->map + tx->offset;
152
      }
153
   }
154
   return tx->map;
155
}
156
157
 
158
static boolean
159
nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
160
{
161
   struct nv04_resource *buf = nv04_resource(tx->base.resource);
162
   const unsigned base = tx->base.box.x;
163
   const unsigned size = tx->base.box.width;
164
165
 
166
167
 
168
                 buf->bo, buf->offset + base, buf->domain, size);
169
170
 
171
      return FALSE;
172
173
 
174
      memcpy(buf->data + base, tx->map, size);
175
176
 
177
}
178
179
 
180
nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
181
                       unsigned offset, unsigned size)
182
{
183
   struct nv04_resource *buf = nv04_resource(tx->base.resource);
184
   uint8_t *data = tx->map + offset;
185
   const unsigned base = tx->base.box.x + offset;
186
   const boolean can_cb = !((base | size) & 3);
187
188
 
189
      memcpy(data, buf->data + base, size);
190
   else
191
      buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
192
193
 
194
      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size);
195
   if (buf->domain == NOUVEAU_BO_GART)
196
      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size);
197
198
 
199
      nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
200
                    tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
201
   else
202
   if ((buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) && nv->push_cb && can_cb)
203
      nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
204
                  base, size / 4, (const uint32_t *)data);
205
   else
206
      nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
207
4401 Serge 208
 
209
   nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr);
210
}
4358 Serge 211
212
 
213
 
214
nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw)
215
{
216
   if (rw == PIPE_TRANSFER_READ) {
217
      if (!buf->fence_wr)
218
         return TRUE;
219
      NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
220
                           !nouveau_fence_signalled(buf->fence_wr));
221
      if (!nouveau_fence_wait(buf->fence_wr))
222
         return FALSE;
223
   } else {
224
      if (!buf->fence)
225
         return TRUE;
226
      NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
227
                           !nouveau_fence_signalled(buf->fence));
228
      if (!nouveau_fence_wait(buf->fence))
229
         return FALSE;
230
231
 
232
   }
233
   nouveau_fence_ref(NULL, &buf->fence_wr);
234
235
 
236
}
237
238
 
239
nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
240
{
241
   if (rw == PIPE_TRANSFER_READ)
242
      return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
243
   else
244
      return (buf->fence && !nouveau_fence_signalled(buf->fence));
245
}
246
247
 
248
nouveau_buffer_transfer_init(struct nouveau_transfer *tx,
249
                             struct pipe_resource *resource,
250
                             const struct pipe_box *box,
251
                             unsigned usage)
252
{
253
   tx->base.resource = resource;
254
   tx->base.level = 0;
255
   tx->base.usage = usage;
256
   tx->base.box.x = box->x;
257
   tx->base.box.y = 0;
258
   tx->base.box.z = 0;
259
   tx->base.box.width = box->width;
260
   tx->base.box.height = 1;
261
   tx->base.box.depth = 1;
262
   tx->base.stride = 0;
263
   tx->base.layer_stride = 0;
264
265
 
266
   tx->map = NULL;
267
}
268
269
 
270
nouveau_buffer_transfer_del(struct nouveau_context *nv,
271
                            struct nouveau_transfer *tx)
272
{
273
   if (tx->map) {
274
      if (likely(tx->bo)) {
275
         nouveau_bo_ref(NULL, &tx->bo);
276
         if (tx->mm)
277
            release_allocation(&tx->mm, nv->screen->fence.current);
278
      } else {
279
         align_free(tx->map -
280
                    (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
281
      }
282
   }
283
}
284
285
 
286
nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
287
{
288
   struct nouveau_transfer tx;
289
   boolean ret;
290
   tx.base.resource = &buf->base;
291
   tx.base.box.x = 0;
292
   tx.base.box.width = buf->base.width0;
293
   tx.bo = NULL;
294
   tx.map = NULL;
295
296
 
297
      if (!nouveau_buffer_malloc(buf))
298
         return FALSE;
299
   if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
300
      return TRUE;
301
   nv->stats.buf_cache_count++;
302
303
 
304
      return FALSE;
305
306
 
307
   if (ret) {
308
      buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY;
309
      memcpy(buf->data, tx.map, buf->base.width0);
310
   }
311
   nouveau_buffer_transfer_del(nv, &tx);
312
   return ret;
313
}
314
315
 
316
 
317
   (PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
318
319
 
320
nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
321
{
322
   if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
323
      return FALSE;
324
   if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
325
      return FALSE;
326
   return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
327
}
328
329
 
330
nouveau_buffer_transfer_map(struct pipe_context *pipe,
331
                            struct pipe_resource *resource,
332
                            unsigned level, unsigned usage,
333
                            const struct pipe_box *box,
334
                            struct pipe_transfer **ptransfer)
335
{
336
   struct nouveau_context *nv = nouveau_context(pipe);
337
   struct nv04_resource *buf = nv04_resource(resource);
338
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
339
   uint8_t *map;
340
   int ret;
341
342
 
343
      return NULL;
344
   nouveau_buffer_transfer_init(tx, resource, box, usage);
345
   *ptransfer = &tx->base;
346
347
 
348
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
349
   if (usage & PIPE_TRANSFER_WRITE)
350
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
351
352
 
353
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
354
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
355
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
356
         nouveau_transfer_staging(nv, tx, TRUE);
357
      } else {
358
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
359
            if (buf->data) {
360
               align_free(buf->data);
361
               buf->data = NULL;
362
            }
363
            nouveau_transfer_staging(nv, tx, FALSE);
364
            nouveau_transfer_read(nv, tx);
365
         } else {
366
            if (usage & PIPE_TRANSFER_WRITE)
367
               nouveau_transfer_staging(nv, tx, TRUE);
368
            if (!buf->data)
369
               nouveau_buffer_cache(nv, buf);
370
         }
371
      }
372
      return buf->data ? (buf->data + box->x) : tx->map;
373
   } else
374
   if (unlikely(buf->domain == 0)) {
375
      return buf->data + box->x;
376
   }
377
378
 
379
      int ref = buf->base.reference.count - 1;
380
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
381
      if (ref > 0) /* any references inside context possible ? */
382
         nv->invalidate_resource_storage(nv, &buf->base, ref);
383
   }
384
385
 
386
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
387
                        nv->client);
388
   if (ret) {
389
      FREE(tx);
390
      return NULL;
391
   }
392
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;
393
394
 
395
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
396
      return map;
397
398
 
399
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
400
         /* Discarding was not possible, must sync because
401
          * subsequent transfers might use UNSYNCHRONIZED. */
402
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
403
      } else
404
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
405
         nouveau_transfer_staging(nv, tx, TRUE);
406
         map = tx->map;
407
      } else
408
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
409
         if (usage & PIPE_TRANSFER_DONTBLOCK)
410
            map = NULL;
411
         else
412
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
413
      } else {
414
         nouveau_transfer_staging(nv, tx, TRUE);
415
         if (tx->map)
416
            memcpy(tx->map, map, box->width);
417
         map = tx->map;
418
      }
419
   }
420
   if (!map)
421
      FREE(tx);
422
   return map;
423
}
424
425
 
426
 
427
 
428
nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
429
                                     struct pipe_transfer *transfer,
430
                                     const struct pipe_box *box)
431
{
432
   struct nouveau_transfer *tx = nouveau_transfer(transfer);
433
   if (tx->map)
434
      nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
435
}
436
437
 
438
nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
439
                              struct pipe_transfer *transfer)
440
{
441
   struct nouveau_context *nv = nouveau_context(pipe);
442
   struct nouveau_transfer *tx = nouveau_transfer(transfer);
443
   struct nv04_resource *buf = nv04_resource(transfer->resource);
444
445
 
446
      if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map)
447
         nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
448
449
 
450
         const uint8_t bind = buf->base.bind;
451
         /* make sure we invalidate dedicated caches */
452
         if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
453
            nv->vbo_dirty = TRUE;
454
         if (bind & (PIPE_BIND_CONSTANT_BUFFER))
455
            nv->cb_dirty = TRUE;
456
      }
457
   }
458
459
 
460
      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
461
462
 
463
   FREE(tx);
464
}
465
466
 
467
 
468
nouveau_copy_buffer(struct nouveau_context *nv,
469
                    struct nv04_resource *dst, unsigned dstx,
470
                    struct nv04_resource *src, unsigned srcx, unsigned size)
471
{
472
   assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
473
474
 
475
      nv->copy_data(nv,
476
                    dst->bo, dst->offset + dstx, dst->domain,
477
                    src->bo, src->offset + srcx, src->domain, size);
478
479
 
480
      nouveau_fence_ref(nv->screen->fence.current, &dst->fence);
481
      nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr);
482
483
 
484
      nouveau_fence_ref(nv->screen->fence.current, &src->fence);
485
   } else {
486
      struct pipe_box src_box;
487
      src_box.x = srcx;
488
      src_box.y = 0;
489
      src_box.z = 0;
490
      src_box.width = size;
491
      src_box.height = 1;
492
      src_box.depth = 1;
493
      util_resource_copy_region(&nv->pipe,
494
                                &dst->base, 0, dstx, 0, 0,
495
                                &src->base, 0, &src_box);
496
   }
497
}
498
499
 
500
 
501
nouveau_resource_map_offset(struct nouveau_context *nv,
502
                            struct nv04_resource *res, uint32_t offset,
503
                            uint32_t flags)
504
{
505
   if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
506
      return res->data + offset;
507
508
 
509
      if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
510
         nouveau_buffer_cache(nv, res);
511
   }
512
   if (res->domain != NOUVEAU_BO_GART)
513
      return res->data + offset;
514
515
 
516
      unsigned rw;
517
      rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
518
      nouveau_buffer_sync(res, rw);
519
      if (nouveau_bo_map(res->bo, 0, NULL))
520
         return NULL;
521
   } else {
522
      if (nouveau_bo_map(res->bo, flags, nv->client))
523
         return NULL;
524
   }
525
   return (uint8_t *)res->bo->map + res->offset + offset;
526
}
527
528
 
529
 
530
{
531
   u_default_resource_get_handle,     /* get_handle */
532
   nouveau_buffer_destroy,               /* resource_destroy */
533
   nouveau_buffer_transfer_map,          /* transfer_map */
534
   nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
535
   nouveau_buffer_transfer_unmap,        /* transfer_unmap */
536
   u_default_transfer_inline_write    /* transfer_inline_write */
537
};
538
539
 
540
nouveau_buffer_create(struct pipe_screen *pscreen,
541
                      const struct pipe_resource *templ)
542
{
543
   struct nouveau_screen *screen = nouveau_screen(pscreen);
544
   struct nv04_resource *buffer;
545
   boolean ret;
546
547
 
548
   if (!buffer)
549
      return NULL;
550
551
 
552
   buffer->vtbl = &nouveau_buffer_vtbl;
553
   pipe_reference_init(&buffer->base.reference, 1);
554
   buffer->base.screen = pscreen;
555
556
 
557
       (screen->vidmem_bindings & screen->sysmem_bindings)) {
558
      switch (buffer->base.usage) {
559
      case PIPE_USAGE_DEFAULT:
560
      case PIPE_USAGE_IMMUTABLE:
561
      case PIPE_USAGE_STATIC:
562
         buffer->domain = NOUVEAU_BO_VRAM;
563
         break;
564
      case PIPE_USAGE_DYNAMIC:
565
         /* For most apps, we'd have to do staging transfers to avoid sync
566
          * with this usage, and GART -> GART copies would be suboptimal.
567
          */
568
         buffer->domain = NOUVEAU_BO_VRAM;
569
         break;
570
      case PIPE_USAGE_STAGING:
571
      case PIPE_USAGE_STREAM:
572
         buffer->domain = NOUVEAU_BO_GART;
573
         break;
574
      default:
575
         assert(0);
576
         break;
577
      }
578
   } else {
579
      if (buffer->base.bind & screen->vidmem_bindings)
580
         buffer->domain = NOUVEAU_BO_VRAM;
581
      else
582
      if (buffer->base.bind & screen->sysmem_bindings)
583
         buffer->domain = NOUVEAU_BO_GART;
584
   }
585
   ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
586
587
 
588
      goto fail;
589
590
 
591
      nouveau_buffer_cache(NULL, buffer);
592
593
 
594
595
 
596
597
 
598
   FREE(buffer);
599
   return NULL;
600
}
601
602
 
603
 
604
nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
605
                           unsigned bytes, unsigned bind)
606
{
607
   struct nv04_resource *buffer;
608
609
 
610
   if (!buffer)
611
      return NULL;
612
613
 
614
   buffer->vtbl = &nouveau_buffer_vtbl;
615
   buffer->base.screen = pscreen;
616
   buffer->base.format = PIPE_FORMAT_R8_UNORM;
617
   buffer->base.usage = PIPE_USAGE_IMMUTABLE;
618
   buffer->base.bind = bind;
619
   buffer->base.width0 = bytes;
620
   buffer->base.height0 = 1;
621
   buffer->base.depth0 = 1;
622
623
 
624
   buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
625
626
 
627
}
628
629
 
630
nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
631
                          struct nouveau_bo *bo, unsigned offset, unsigned size)
632
{
633
   if (!nouveau_buffer_malloc(buf))
634
      return FALSE;
635
   if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
636
      return FALSE;
637
   memcpy(buf->data, (uint8_t *)bo->map + offset, size);
638
   return TRUE;
639
}
640
641
 
642
boolean
643
nouveau_buffer_migrate(struct nouveau_context *nv,
644
                       struct nv04_resource *buf, const unsigned new_domain)
645
{
646
   struct nouveau_screen *screen = nv->screen;
647
   struct nouveau_bo *bo;
648
   const unsigned old_domain = buf->domain;
649
   unsigned size = buf->base.width0;
650
   unsigned offset;
651
   int ret;
652
653
 
654
655
 
656
      if (!nouveau_buffer_allocate(screen, buf, new_domain))
657
         return FALSE;
658
      ret = nouveau_bo_map(buf->bo, 0, nv->client);
659
      if (ret)
660
         return ret;
661
      memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
662
      align_free(buf->data);
663
   } else
664
   if (old_domain != 0 && new_domain != 0) {
665
      struct nouveau_mm_allocation *mm = buf->mm;
666
667
 
668
         /* keep a system memory copy of our data in case we hit a fallback */
669
         if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
670
            return FALSE;
671
         if (nouveau_mesa_debug)
672
            debug_printf("migrating %u KiB to VRAM\n", size / 1024);
673
      }
674
675
 
676
      bo = buf->bo;
677
      buf->bo = NULL;
678
      buf->mm = NULL;
679
      nouveau_buffer_allocate(screen, buf, new_domain);
680
681
 
682
                    bo, offset, old_domain, buf->base.width0);
683
684
 
685
      if (mm)
686
         release_allocation(&mm, screen->fence.current);
687
   } else
688
   if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
689
      struct nouveau_transfer tx;
690
      if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
691
         return FALSE;
692
      tx.base.resource = &buf->base;
693
      tx.base.box.x = 0;
694
      tx.base.box.width = buf->base.width0;
695
      tx.bo = NULL;
696
      tx.map = NULL;
697
      if (!nouveau_transfer_staging(nv, &tx, FALSE))
698
         return FALSE;
699
      nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
700
      nouveau_buffer_transfer_del(nv, &tx);
701
   } else
702
      return FALSE;
703
704
 
705
   return TRUE;
706
}
707
708
 
709
 * We'd like to only allocate @size bytes here, but then we'd have to rebase
710
 * the vertex indices ...
711
 */
712
boolean
713
nouveau_user_buffer_upload(struct nouveau_context *nv,
714
                           struct nv04_resource *buf,
715
                           unsigned base, unsigned size)
716
{
717
   struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
718
   int ret;
719
720
 
721
722
 
723
   if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
724
      return FALSE;
725
726
 
727
   if (ret)
728
      return FALSE;
729
   memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
730
731
 
732
}
733
734
 
735
 
736
737
 
738
nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
739
                         unsigned size)
740
{
741
   return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
742
                         4096, size, NULL, pbo);
743
}
744
745
 
746
nouveau_scratch_runout_release(struct nouveau_context *nv)
747
{
748
   if (!nv->scratch.nr_runout)
749
      return;
750
   do {
751
      --nv->scratch.nr_runout;
752
      nouveau_bo_ref(NULL, &nv->scratch.runout[nv->scratch.nr_runout]);
753
   } while (nv->scratch.nr_runout);
754
755
 
756
   nv->scratch.end = 0;
757
   nv->scratch.runout = NULL;
758
}
759
760
 
761
 * (Could happen for very large user arrays.)
762
 */
763
static INLINE boolean
764
nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
765
{
766
   int ret;
767
   const unsigned n = nv->scratch.nr_runout++;
768
769
 
770
                                (n + 0) * sizeof(*nv->scratch.runout),
771
                                (n + 1) * sizeof(*nv->scratch.runout));
772
   nv->scratch.runout[n] = NULL;
773
774
 
775
   if (!ret) {
776
      ret = nouveau_bo_map(nv->scratch.runout[n], 0, NULL);
777
      if (ret)
778
         nouveau_bo_ref(NULL, &nv->scratch.runout[--nv->scratch.nr_runout]);
779
   }
780
   if (!ret) {
781
      nv->scratch.current = nv->scratch.runout[n];
782
      nv->scratch.offset = 0;
783
      nv->scratch.end = size;
784
      nv->scratch.map = nv->scratch.current->map;
785
   }
786
   return !ret;
787
}
788
789
 
790
 * Allocate it if it has not yet been created.
791
 */
792
static INLINE boolean
793
nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
794
{
795
   struct nouveau_bo *bo;
796
   int ret;
797
   const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
798
799
 
800
      return FALSE;
801
   nv->scratch.id = i;
802
803
 
804
   if (!bo) {
805
      ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
806
      if (ret)
807
         return FALSE;
808
      nv->scratch.bo[i] = bo;
809
   }
810
   nv->scratch.current = bo;
811
   nv->scratch.offset = 0;
812
   nv->scratch.end = nv->scratch.bo_size;
813
814
 
815
   if (!ret)
816
      nv->scratch.map = bo->map;
817
   return !ret;
818
}
819
820
 
821
nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
822
{
823
   boolean ret;
824
825
 
826
   if (!ret)
827
      ret = nouveau_scratch_runout(nv, min_size);
828
   return ret;
829
}
830
831
 
832
 
833
uint64_t
834
nouveau_scratch_data(struct nouveau_context *nv,
835
                     const void *data, unsigned base, unsigned size,
836
                     struct nouveau_bo **bo)
837
{
838
   unsigned bgn = MAX2(base, nv->scratch.offset);
839
   unsigned end = bgn + size;
840
841
 
842
      end = base + size;
843
      if (!nouveau_scratch_more(nv, end))
844
         return 0;
845
      bgn = base;
846
   }
847
   nv->scratch.offset = align(end, 4);
848
849
 
850
851
 
852
   return (*bo)->offset + (bgn - base);
853
}
854
855
 
856
nouveau_scratch_get(struct nouveau_context *nv,
857
                    unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
858
{
859
   unsigned bgn = nv->scratch.offset;
860
   unsigned end = nv->scratch.offset + size;
861
862
 
863
      end = size;
864
      if (!nouveau_scratch_more(nv, end))
865
         return NULL;
866
      bgn = 0;
867
   }
868
   nv->scratch.offset = align(end, 4);
869
870
 
871
   *gpu_addr = nv->scratch.current->offset + bgn;
872
   return nv->scratch.map + bgn;
873
}
874