Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. #ifndef __NOUVEAU_BUFFER_H__
  2. #define __NOUVEAU_BUFFER_H__
  3.  
  4. #include "util/u_transfer.h"
  5. #include "util/u_double_list.h"
  6.  
  7. struct pipe_resource;
  8. struct nouveau_context;
  9. struct nouveau_bo;
  10.  
  11. /* DIRTY: buffer was (or will be after the next flush) written to by GPU and
  12.  *  resource->data has not been updated to reflect modified VRAM contents
  13.  *
  14.  * USER_MEMORY: resource->data is a pointer to client memory and may change
  15.  *  between GL calls
  16.  */
  17. #define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
  18. #define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
  19. #define NOUVEAU_BUFFER_STATUS_DIRTY       (1 << 2)
  20. #define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
  21.  
  22. #define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
  23.  
  24. /* Resources, if mapped into the GPU's address space, are guaranteed to
  25.  * have constant virtual addresses (nv50+).
  26.  *
  27.  * The address of a resource will lie within the nouveau_bo referenced,
  28.  * and this bo should be added to the memory manager's validation list.
  29.  */
  30. struct nv04_resource {
  31.    struct pipe_resource base;
  32.    const struct u_resource_vtbl *vtbl;
  33.  
  34.    uint64_t address; /* virtual address (nv50+) */
  35.  
  36.    uint8_t *data;
  37.    struct nouveau_bo *bo;
  38.    uint32_t offset;
  39.  
  40.    uint8_t status;
  41.    uint8_t domain;
  42.  
  43.    struct nouveau_fence *fence;
  44.    struct nouveau_fence *fence_wr;
  45.  
  46.    struct nouveau_mm_allocation *mm;
  47. };
  48.  
  49. void
  50. nouveau_buffer_release_gpu_storage(struct nv04_resource *);
  51.  
  52. void
  53. nouveau_copy_buffer(struct nouveau_context *,
  54.                     struct nv04_resource *dst, unsigned dst_pos,
  55.                     struct nv04_resource *src, unsigned src_pos, unsigned size);
  56.  
  57. boolean
  58. nouveau_buffer_migrate(struct nouveau_context *,
  59.                        struct nv04_resource *, unsigned domain);
  60.  
  61. void *
  62. nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
  63.                             uint32_t offset, uint32_t flags);
  64.  
  65. static INLINE void
  66. nouveau_resource_unmap(struct nv04_resource *res)
  67. {
  68.    /* no-op */
  69. }
  70.  
  71. static INLINE struct nv04_resource *
  72. nv04_resource(struct pipe_resource *resource)
  73. {
  74.    return (struct nv04_resource *)resource;
  75. }
  76.  
  77. /* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
  78. static INLINE boolean
  79. nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
  80. {
  81.    return nv04_resource(resource)->domain != 0;
  82. }
  83.  
  84. struct pipe_resource *
  85. nouveau_buffer_create(struct pipe_screen *pscreen,
  86.                       const struct pipe_resource *templ);
  87.  
  88. struct pipe_resource *
  89. nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
  90.                            unsigned bytes, unsigned usage);
  91.  
  92. boolean
  93. nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
  94.                            unsigned base, unsigned size);
  95.  
  96. /* Copy data to a scratch buffer and return address & bo the data resides in.
  97.  * Returns 0 on failure.
  98.  */
  99. uint64_t
  100. nouveau_scratch_data(struct nouveau_context *,
  101.                      const void *data, unsigned base, unsigned size,
  102.                      struct nouveau_bo **);
  103.  
  104. #endif
  105.