Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /**********************************************************
  2.  * Copyright 2009 VMware, Inc.  All rights reserved.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person
  5.  * obtaining a copy of this software and associated documentation
  6.  * files (the "Software"), to deal in the Software without
  7.  * restriction, including without limitation the rights to use, copy,
  8.  * modify, merge, publish, distribute, sublicense, and/or sell copies
  9.  * of the Software, and to permit persons to whom the Software is
  10.  * furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice shall be
  13.  * included in all copies or substantial portions of the Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  18.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  19.  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  20.  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21.  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22.  * SOFTWARE.
  23.  *
  24.  **********************************************************/
  25.  
  26. /**
  27.  * @file
  28.  * SVGA buffer manager for Guest Memory Regions (GMRs).
  29.  *
  30.  * GMRs are used for pixel and vertex data upload/download to/from the virtual
  31.  * SVGA hardware. There is a limited number of GMRs available, and
  32.  * creating/destroying them is also a slow operation so we must suballocate
  33.  * them.
  34.  *
  35.  * This file implements a pipebuffer library's buffer manager, so that we can
  36.  * use pipepbuffer's suballocation, fencing, and debugging facilities with GMRs.
  37.  *
  38.  * @author Jose Fonseca <jfonseca@vmware.com>
  39.  */
  40.  
  41.  
  42. #include "svga_cmd.h"
  43.  
  44. #include "util/u_inlines.h"
  45. #include "util/u_memory.h"
  46. #include "pipebuffer/pb_buffer.h"
  47. #include "pipebuffer/pb_bufmgr.h"
  48.  
  49. #include "svga_winsys.h"
  50.  
  51. #include "vmw_screen.h"
  52. #include "vmw_buffer.h"
  53.  
  54. struct vmw_gmr_bufmgr;
  55.  
  56.  
  57. struct vmw_gmr_buffer
  58. {
  59.    struct pb_buffer base;
  60.    
  61.    struct vmw_gmr_bufmgr *mgr;
  62.    
  63.    struct vmw_region *region;
  64.    void *map;
  65.    unsigned map_flags;
  66. };
  67.  
  68.  
  69. extern const struct pb_vtbl vmw_gmr_buffer_vtbl;
  70.  
  71.  
  72. static INLINE struct vmw_gmr_buffer *
  73. vmw_gmr_buffer(struct pb_buffer *buf)
  74. {
  75.    assert(buf);
  76.    assert(buf->vtbl == &vmw_gmr_buffer_vtbl);
  77.    return (struct vmw_gmr_buffer *)buf;
  78. }
  79.  
  80.  
  81. struct vmw_gmr_bufmgr
  82. {
  83.    struct pb_manager base;
  84.    
  85.    struct vmw_winsys_screen *vws;
  86. };
  87.  
  88.  
  89. static INLINE struct vmw_gmr_bufmgr *
  90. vmw_gmr_bufmgr(struct pb_manager *mgr)
  91. {
  92.    assert(mgr);
  93.    return (struct vmw_gmr_bufmgr *)mgr;
  94. }
  95.  
  96.  
  97. static void
  98. vmw_gmr_buffer_destroy(struct pb_buffer *_buf)
  99. {
  100.    struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
  101.  
  102.    vmw_ioctl_region_unmap(buf->region);
  103.    
  104.    vmw_ioctl_region_destroy(buf->region);
  105.  
  106.    FREE(buf);
  107. }
  108.  
  109.  
  110. static void *
  111. vmw_gmr_buffer_map(struct pb_buffer *_buf,
  112.                    unsigned flags,
  113.                    void *flush_ctx)
  114. {
  115.    struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
  116.    int ret;
  117.  
  118.    if (!buf->map)
  119.       buf->map = vmw_ioctl_region_map(buf->region);
  120.  
  121.    if (!buf->map)
  122.       return NULL;
  123.  
  124.  
  125.    if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
  126.        !(flags & PB_USAGE_UNSYNCHRONIZED)) {
  127.       ret = vmw_ioctl_syncforcpu(buf->region,
  128.                                  !!(flags & PB_USAGE_DONTBLOCK),
  129.                                  !(flags & PB_USAGE_CPU_WRITE),
  130.                                  FALSE);
  131.       if (ret)
  132.          return NULL;
  133.    }
  134.  
  135.    return buf->map;
  136. }
  137.  
  138.  
  139. static void
  140. vmw_gmr_buffer_unmap(struct pb_buffer *_buf)
  141. {
  142.    struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
  143.    unsigned flags = buf->map_flags;
  144.  
  145.    if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
  146.        !(flags & PB_USAGE_UNSYNCHRONIZED)) {
  147.       vmw_ioctl_releasefromcpu(buf->region,
  148.                                !(flags & PB_USAGE_CPU_WRITE),
  149.                                FALSE);
  150.    }
  151. }
  152.  
  153.  
  154. static void
  155. vmw_gmr_buffer_get_base_buffer(struct pb_buffer *buf,
  156.                            struct pb_buffer **base_buf,
  157.                            unsigned *offset)
  158. {
  159.    *base_buf = buf;
  160.    *offset = 0;
  161. }
  162.  
  163.  
  164. static enum pipe_error
  165. vmw_gmr_buffer_validate( struct pb_buffer *_buf,
  166.                          struct pb_validate *vl,
  167.                          unsigned flags )
  168. {
  169.    /* Always pinned */
  170.    return PIPE_OK;
  171. }
  172.  
  173.  
  174. static void
  175. vmw_gmr_buffer_fence( struct pb_buffer *_buf,
  176.                       struct pipe_fence_handle *fence )
  177. {
  178.    /* We don't need to do anything, as the pipebuffer library
  179.     * will take care of delaying the destruction of fenced buffers */  
  180. }
  181.  
  182.  
  183. const struct pb_vtbl vmw_gmr_buffer_vtbl = {
  184.    vmw_gmr_buffer_destroy,
  185.    vmw_gmr_buffer_map,
  186.    vmw_gmr_buffer_unmap,
  187.    vmw_gmr_buffer_validate,
  188.    vmw_gmr_buffer_fence,
  189.    vmw_gmr_buffer_get_base_buffer
  190. };
  191.  
  192.  
  193. static struct pb_buffer *
  194. vmw_gmr_bufmgr_create_buffer(struct pb_manager *_mgr,
  195.                          pb_size size,
  196.                          const struct pb_desc *pb_desc)
  197. {
  198.    struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr);
  199.    struct vmw_winsys_screen *vws = mgr->vws;
  200.    struct vmw_gmr_buffer *buf;
  201.    const struct vmw_buffer_desc *desc =
  202.       (const struct vmw_buffer_desc *) pb_desc;
  203.    
  204.    buf = CALLOC_STRUCT(vmw_gmr_buffer);
  205.    if(!buf)
  206.       goto error1;
  207.  
  208.    pipe_reference_init(&buf->base.reference, 1);
  209.    buf->base.alignment = pb_desc->alignment;
  210.    buf->base.usage = pb_desc->usage & ~VMW_BUFFER_USAGE_SHARED;
  211.    buf->base.vtbl = &vmw_gmr_buffer_vtbl;
  212.    buf->mgr = mgr;
  213.    buf->base.size = size;
  214.    if ((pb_desc->usage & VMW_BUFFER_USAGE_SHARED) && desc->region) {
  215.       buf->region = desc->region;
  216.    } else {
  217.       buf->region = vmw_ioctl_region_create(vws, size);
  218.       if(!buf->region)
  219.          goto error2;
  220.    }
  221.          
  222.    return &buf->base;
  223. error2:
  224.    FREE(buf);
  225. error1:
  226.    return NULL;
  227. }
  228.  
  229.  
  230. static void
  231. vmw_gmr_bufmgr_flush(struct pb_manager *mgr)
  232. {
  233.    /* No-op */
  234. }
  235.  
  236.  
  237. static void
  238. vmw_gmr_bufmgr_destroy(struct pb_manager *_mgr)
  239. {
  240.    struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr);
  241.    FREE(mgr);
  242. }
  243.  
  244.  
  245. struct pb_manager *
  246. vmw_gmr_bufmgr_create(struct vmw_winsys_screen *vws)
  247. {
  248.    struct vmw_gmr_bufmgr *mgr;
  249.    
  250.    mgr = CALLOC_STRUCT(vmw_gmr_bufmgr);
  251.    if(!mgr)
  252.       return NULL;
  253.  
  254.    mgr->base.destroy = vmw_gmr_bufmgr_destroy;
  255.    mgr->base.create_buffer = vmw_gmr_bufmgr_create_buffer;
  256.    mgr->base.flush = vmw_gmr_bufmgr_flush;
  257.    
  258.    mgr->vws = vws;
  259.    
  260.    return &mgr->base;
  261. }
  262.  
  263.  
  264. boolean
  265. vmw_gmr_bufmgr_region_ptr(struct pb_buffer *buf,
  266.                           struct SVGAGuestPtr *ptr)
  267. {
  268.    struct pb_buffer *base_buf;
  269.    unsigned offset = 0;
  270.    struct vmw_gmr_buffer *gmr_buf;
  271.    
  272.    pb_get_base_buffer( buf, &base_buf, &offset );
  273.    
  274.    gmr_buf = vmw_gmr_buffer(base_buf);
  275.    if(!gmr_buf)
  276.       return FALSE;
  277.    
  278.    *ptr = vmw_ioctl_region_ptr(gmr_buf->region);
  279.    
  280.    ptr->offset += offset;
  281.    
  282.    return TRUE;
  283. }
  284.  
  285. #ifdef DEBUG
  286. struct svga_winsys_buffer {
  287.    struct pb_buffer *pb_buf;
  288.    struct debug_flush_buf *fbuf;
  289. };
  290.  
  291. struct pb_buffer *
  292. vmw_pb_buffer(struct svga_winsys_buffer *buffer)
  293. {
  294.    assert(buffer);
  295.    return buffer->pb_buf;
  296. }
  297.  
  298. struct svga_winsys_buffer *
  299. vmw_svga_winsys_buffer_wrap(struct pb_buffer *buffer)
  300. {
  301.    struct svga_winsys_buffer *buf;
  302.  
  303.    if (!buffer)
  304.       return NULL;
  305.  
  306.    buf = CALLOC_STRUCT(svga_winsys_buffer);
  307.    if (!buf) {
  308.       pb_reference(&buffer, NULL);
  309.       return NULL;
  310.    }
  311.  
  312.    buf->pb_buf = buffer;
  313.    buf->fbuf = debug_flush_buf_create(TRUE, VMW_DEBUG_FLUSH_STACK);
  314.    return buf;
  315. }
  316.  
  317. struct debug_flush_buf *
  318. vmw_debug_flush_buf(struct svga_winsys_buffer *buffer)
  319. {
  320.    return buffer->fbuf;
  321. }
  322.  
  323. #endif
  324.  
  325. void
  326. vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen *sws,
  327.                                struct svga_winsys_buffer *buf)
  328. {
  329.    struct pb_buffer *pbuf = vmw_pb_buffer(buf);
  330.    (void)sws;
  331.    pb_reference(&pbuf, NULL);
  332. #ifdef DEBUG
  333.    debug_flush_buf_reference(&buf->fbuf, NULL);
  334.    FREE(buf);
  335. #endif
  336. }
  337.  
  338. void *
  339. vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
  340.                            struct svga_winsys_buffer *buf,
  341.                            unsigned flags)
  342. {
  343.    void *map;
  344.  
  345.    (void)sws;
  346.    if (flags & PIPE_TRANSFER_UNSYNCHRONIZED)
  347.       flags &= ~PIPE_TRANSFER_DONTBLOCK;
  348.  
  349.    map = pb_map(vmw_pb_buffer(buf), flags, NULL);
  350.  
  351. #ifdef DEBUG
  352.    if (map != NULL)
  353.       debug_flush_map(buf->fbuf, flags);
  354. #endif
  355.  
  356.    return map;
  357. }
  358.  
  359.  
  360. void
  361. vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen *sws,
  362.                              struct svga_winsys_buffer *buf)
  363. {
  364.    (void)sws;
  365.  
  366. #ifdef DEBUG
  367.    debug_flush_unmap(buf->fbuf);
  368. #endif
  369.  
  370.    pb_unmap(vmw_pb_buffer(buf));
  371. }
  372.