Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. #ifndef __NOUVEAU_BUFFER_H__
  2. #define __NOUVEAU_BUFFER_H__
  3.  
  4. #include "util/u_range.h"
  5. #include "util/u_transfer.h"
  6. #include "util/list.h"
  7.  
  8. struct pipe_resource;
  9. struct nouveau_context;
  10. struct nouveau_bo;
  11.  
  12. /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
  13.  *  resource->data has not been updated to reflect modified VRAM contents
  14.  *
  15.  * USER_MEMORY: resource->data is a pointer to client memory and may change
  16.  *  between GL calls
  17.  */
  18. #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
  19. #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
  20. #define NOUVEAU_BUFFER_STATUS_DIRTY       (1 << 2)
  21. #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
  22.  
  23. #define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
  24.  
  25. /* Resources, if mapped into the GPU's address space, are guaranteed to
  26.  * have constant virtual addresses (nv50+).
  27.  *
  28.  * The address of a resource will lie within the nouveau_bo referenced,
  29.  * and this bo should be added to the memory manager's validation list.
  30.  */
  31. struct nv04_resource {
  32.    struct pipe_resource base;
  33.    const struct u_resource_vtbl *vtbl;
  34.  
  35.    uint64_t address; /* virtual address (nv50+) */
  36.  
  37.    uint8_t *data; /* resource's contents, if domain == 0, or cached */
  38.    struct nouveau_bo *bo;
  39.    uint32_t offset; /* offset into the data/bo */
  40.  
  41.    uint8_t status;
  42.    uint8_t domain;
  43.  
  44.    struct nouveau_fence *fence;
  45.    struct nouveau_fence *fence_wr;
  46.  
  47.    struct nouveau_mm_allocation *mm;
  48.  
  49.    /* buffer range that has been initialized */
  50.    struct util_range valid_buffer_range;
  51. };
  52.  
  53. void
  54. nouveau_buffer_release_gpu_storage(struct nv04_resource *);
  55.  
  56. void
  57. nouveau_copy_buffer(struct nouveau_context *,
  58.                     struct nv04_resource *dst, unsigned dst_pos,
  59.                     struct nv04_resource *src, unsigned src_pos, unsigned size);
  60.  
  61. boolean
  62. nouveau_buffer_migrate(struct nouveau_context *,
  63.                        struct nv04_resource *, unsigned domain);
  64.  
  65. void *
  66. nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
  67.                             uint32_t offset, uint32_t flags);
  68.  
  69. static INLINE void
  70. nouveau_resource_unmap(struct nv04_resource *res)
  71. {
  72.    /* no-op */
  73. }
  74.  
  75. static INLINE struct nv04_resource *
  76. nv04_resource(struct pipe_resource *resource)
  77. {
  78.    return (struct nv04_resource *)resource;
  79. }
  80.  
  81. /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
  82. static INLINE boolean
  83. nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
  84. {
  85.    return nv04_resource(resource)->domain != 0;
  86. }
  87.  
  88. struct pipe_resource *
  89. nouveau_buffer_create(struct pipe_screen *pscreen,
  90.                       const struct pipe_resource *templ);
  91.  
  92. struct pipe_resource *
  93. nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
  94.                            unsigned bytes, unsigned usage);
  95.  
  96. boolean
  97. nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
  98.                            unsigned base, unsigned size);
  99.  
  100. /* Copy data to a scratch buffer and return address & bo the data resides in.
  101.  * Returns 0 on failure.
  102.  */
  103. uint64_t
  104. nouveau_scratch_data(struct nouveau_context *,
  105.                      const void *data, unsigned base, unsigned size,
  106.                      struct nouveau_bo **);
  107.  
  108. #endif
  109.