Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2012 VMware, Inc.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. /**
  29.  * @file
  30.  * u_debug_flush.c Debug flush and map-related issues:
  31.  * - Flush while synchronously mapped.
  32.  * - Command stream reference while synchronously mapped.
  33.  * - Synchronous map while referenced on command stream.
  34.  * - Recursive maps.
  35.  * - Unmap while not mapped.
  36.  *
  37.  * @author Thomas Hellstrom <thellstrom@vmware.com>
  38.  */
  39.  
  40. #ifdef DEBUG
  41. #include "pipe/p_compiler.h"
  42. #include "util/u_debug_stack.h"
  43. #include "util/u_debug.h"
  44. #include "util/u_memory.h"
  45. #include "util/u_debug_flush.h"
  46. #include "util/u_hash_table.h"
  47. #include "util/list.h"
  48. #include "util/u_inlines.h"
  49. #include "util/u_string.h"
  50. #include "os/os_thread.h"
  51. #include <stdio.h>
  52.  
  53. struct debug_flush_buf {
  54.    /* Atomic */
  55.    struct pipe_reference reference; /* Must be the first member. */
  56.    pipe_mutex mutex;
  57.    /* Immutable */
  58.    boolean supports_unsync;
  59.    unsigned bt_depth;
  60.    /* Protected by mutex */
  61.    boolean mapped;
  62.    boolean mapped_sync;
  63.    struct debug_stack_frame *map_frame;
  64. };
  65.  
  66. struct debug_flush_item {
  67.    struct debug_flush_buf *fbuf;
  68.    unsigned bt_depth;
  69.    struct debug_stack_frame *ref_frame;
  70. };
  71.  
  72. struct debug_flush_ctx {
  73.    /* Contexts are used by a single thread at a time */
  74.    unsigned bt_depth;
  75.    boolean catch_map_of_referenced;
  76.    struct util_hash_table *ref_hash;
  77.    struct list_head head;
  78. };
  79.  
  80. pipe_static_mutex(list_mutex);
  81. static struct list_head ctx_list = {&ctx_list, &ctx_list};
  82.  
  83. static struct debug_stack_frame *
  84. debug_flush_capture_frame(int start, int depth)
  85. {
  86.    struct debug_stack_frame *frames;
  87.  
  88.    frames = CALLOC(depth, sizeof(*frames));
  89.    if (!frames)
  90.       return NULL;
  91.  
  92.    debug_backtrace_capture(frames, start, depth);
  93.    return frames;
  94. }
  95.  
  96. static int
  97. debug_flush_pointer_compare(void *key1, void *key2)
  98. {
  99.    return (key1 == key2) ? 0 : 1;
  100. }
  101.  
  102. static unsigned
  103. debug_flush_pointer_hash(void *key)
  104. {
  105.    return (unsigned) (unsigned long) key;
  106. }
  107.  
  108. struct debug_flush_buf *
  109. debug_flush_buf_create(boolean supports_unsync, unsigned bt_depth)
  110. {
  111.    struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
  112.  
  113.    if (!fbuf)
  114.       goto out_no_buf;
  115.  
  116.    fbuf->supports_unsync = supports_unsync;
  117.    fbuf->bt_depth = bt_depth;
  118.    pipe_reference_init(&fbuf->reference, 1);
  119.    pipe_mutex_init(fbuf->mutex);
  120.  
  121.    return fbuf;
  122. out_no_buf:
  123.    debug_printf("Debug flush buffer creation failed.\n");
  124.    debug_printf("Debug flush checking for this buffer will be incomplete.\n");
  125.    return NULL;
  126. }
  127.  
  128. void
  129. debug_flush_buf_reference(struct debug_flush_buf **dst,
  130.                           struct debug_flush_buf *src)
  131. {
  132.    struct debug_flush_buf *fbuf = *dst;
  133.  
  134.    if (pipe_reference(&(*dst)->reference, &src->reference)) {
  135.       FREE(fbuf->map_frame);
  136.  
  137.       FREE(fbuf);
  138.    }
  139.  
  140.    *dst = src;
  141. }
  142.  
  143. static void
  144. debug_flush_item_destroy(struct debug_flush_item *item)
  145. {
  146.    debug_flush_buf_reference(&item->fbuf, NULL);
  147.  
  148.    FREE(item->ref_frame);
  149.  
  150.    FREE(item);
  151. }
  152.  
  153. struct debug_flush_ctx *
  154. debug_flush_ctx_create(boolean catch_reference_of_mapped, unsigned bt_depth)
  155. {
  156.    struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
  157.  
  158.    if (!fctx)
  159.       goto out_no_ctx;
  160.  
  161.    fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
  162.                                            debug_flush_pointer_compare);
  163.  
  164.    if (!fctx->ref_hash)
  165.       goto out_no_ref_hash;
  166.  
  167.    fctx->bt_depth = bt_depth;
  168.    pipe_mutex_lock(list_mutex);
  169.    list_addtail(&fctx->head, &ctx_list);
  170.    pipe_mutex_unlock(list_mutex);
  171.  
  172.    return fctx;
  173.  
  174.  out_no_ref_hash:
  175.    FREE(fctx);
  176. out_no_ctx:
  177.    debug_printf("Debug flush context creation failed.\n");
  178.    debug_printf("Debug flush checking for this context will be incomplete.\n");
  179.    return NULL;
  180. }
  181.  
  182. static void
  183. debug_flush_alert(const char *s, const char *op,
  184.                   unsigned start, unsigned depth,
  185.                   boolean continued,
  186.                   boolean capture,
  187.                   const struct debug_stack_frame *frame)
  188. {
  189.    if (capture)
  190.       frame = debug_flush_capture_frame(start, depth);
  191.  
  192.    if (s)
  193.       debug_printf("%s ", s);
  194.    if (frame) {
  195.       debug_printf("%s backtrace follows:\n", op);
  196.       debug_backtrace_dump(frame, depth);
  197.    } else
  198.       debug_printf("No %s backtrace was captured.\n", op);
  199.  
  200.    if (continued)
  201.       debug_printf("**********************************\n");
  202.    else
  203.       debug_printf("*********END OF MESSAGE***********\n\n\n");
  204.  
  205.    if (capture)
  206.       FREE((void *)frame);
  207. }
  208.  
  209.  
  210. void
  211. debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
  212. {
  213.    boolean mapped_sync = FALSE;
  214.  
  215.    if (!fbuf)
  216.       return;
  217.  
  218.    pipe_mutex_lock(fbuf->mutex);
  219.    if (fbuf->mapped) {
  220.       debug_flush_alert("Recursive map detected.", "Map",
  221.                         2, fbuf->bt_depth, TRUE, TRUE, NULL);
  222.       debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
  223.                         FALSE, fbuf->map_frame);
  224.    } else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
  225.               !fbuf->supports_unsync) {
  226.       fbuf->mapped_sync = mapped_sync = TRUE;
  227.    }
  228.    fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
  229.    fbuf->mapped = TRUE;
  230.    pipe_mutex_unlock(fbuf->mutex);
  231.  
  232.    if (mapped_sync) {
  233.       struct debug_flush_ctx *fctx;
  234.  
  235.       pipe_mutex_lock(list_mutex);
  236.       LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
  237.          struct debug_flush_item *item =
  238.             util_hash_table_get(fctx->ref_hash, fbuf);
  239.  
  240.          if (item && fctx->catch_map_of_referenced) {
  241.             debug_flush_alert("Already referenced map detected.",
  242.                               "Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
  243.             debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
  244.                               FALSE, FALSE, item->ref_frame);
  245.          }
  246.       }
  247.       pipe_mutex_unlock(list_mutex);
  248.    }
  249. }
  250.  
  251. void
  252. debug_flush_unmap(struct debug_flush_buf *fbuf)
  253. {
  254.    if (!fbuf)
  255.       return;
  256.  
  257.    pipe_mutex_lock(fbuf->mutex);
  258.    if (!fbuf->mapped)
  259.       debug_flush_alert("Unmap not previously mapped detected.", "Map",
  260.                         2, fbuf->bt_depth, FALSE, TRUE, NULL);
  261.  
  262.    fbuf->mapped_sync = FALSE;
  263.    fbuf->mapped = FALSE;
  264.    FREE(fbuf->map_frame);
  265.    fbuf->map_frame = NULL;
  266.    pipe_mutex_unlock(fbuf->mutex);
  267. }
  268.  
  269. void
  270. debug_flush_cb_reference(struct debug_flush_ctx *fctx,
  271.                          struct debug_flush_buf *fbuf)
  272. {
  273.    struct debug_flush_item *item;
  274.  
  275.    if (!fctx || !fbuf)
  276.       return;
  277.  
  278.    item = util_hash_table_get(fctx->ref_hash, fbuf);
  279.  
  280.    pipe_mutex_lock(fbuf->mutex);
  281.    if (fbuf->mapped_sync) {
  282.       debug_flush_alert("Reference of mapped buffer detected.", "Reference",
  283.                         2, fctx->bt_depth, TRUE, TRUE, NULL);
  284.       debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
  285.                         FALSE, fbuf->map_frame);
  286.    }
  287.    pipe_mutex_unlock(fbuf->mutex);
  288.  
  289.    if (!item) {
  290.       item = CALLOC_STRUCT(debug_flush_item);
  291.       if (item) {
  292.          debug_flush_buf_reference(&item->fbuf, fbuf);
  293.          item->bt_depth = fctx->bt_depth;
  294.          item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
  295.          if (util_hash_table_set(fctx->ref_hash, fbuf, item) != PIPE_OK) {
  296.             debug_flush_item_destroy(item);
  297.             goto out_no_item;
  298.          }
  299.          return;
  300.       }
  301.       goto out_no_item;
  302.    }
  303.    return;
  304.  
  305. out_no_item:
  306.    debug_printf("Debug flush command buffer reference creation failed.\n");
  307.    debug_printf("Debug flush checking will be incomplete "
  308.                 "for this command batch.\n");
  309. }
  310.  
  311. static enum pipe_error
  312. debug_flush_might_flush_cb(void *key, void *value, void *data)
  313. {
  314.    struct debug_flush_item *item =
  315.       (struct debug_flush_item *) value;
  316.    struct debug_flush_buf *fbuf = item->fbuf;
  317.    const char *reason = (const char *) data;
  318.    char message[80];
  319.  
  320.    util_snprintf(message, sizeof(message),
  321.                  "%s referenced mapped buffer detected.", reason);
  322.  
  323.    pipe_mutex_lock(fbuf->mutex);
  324.    if (fbuf->mapped_sync) {
  325.       debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
  326.       debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
  327.                         fbuf->map_frame);
  328.       debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
  329.                         FALSE, item->ref_frame);
  330.    }
  331.    pipe_mutex_unlock(fbuf->mutex);
  332.  
  333.    return PIPE_OK;
  334. }
  335.  
  336. void
  337. debug_flush_might_flush(struct debug_flush_ctx *fctx)
  338. {
  339.    if (!fctx)
  340.       return;
  341.  
  342.    util_hash_table_foreach(fctx->ref_hash,
  343.                            debug_flush_might_flush_cb,
  344.                            "Might flush");
  345. }
  346.  
  347. static enum pipe_error
  348. debug_flush_flush_cb(void *key, void *value, void *data)
  349. {
  350.    struct debug_flush_item *item =
  351.       (struct debug_flush_item *) value;
  352.  
  353.    debug_flush_item_destroy(item);
  354.  
  355.    return PIPE_OK;
  356. }
  357.  
  358.  
  359. void
  360. debug_flush_flush(struct debug_flush_ctx *fctx)
  361. {
  362.    if (!fctx)
  363.       return;
  364.  
  365.    util_hash_table_foreach(fctx->ref_hash,
  366.                            debug_flush_might_flush_cb,
  367.                            "Flush");
  368.    util_hash_table_foreach(fctx->ref_hash,
  369.                            debug_flush_flush_cb,
  370.                            NULL);
  371.    util_hash_table_clear(fctx->ref_hash);
  372. }
  373.  
  374. void
  375. debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
  376. {
  377.    if (!fctx)
  378.       return;
  379.  
  380.    list_del(&fctx->head);
  381.    util_hash_table_foreach(fctx->ref_hash,
  382.                            debug_flush_flush_cb,
  383.                            NULL);
  384.    util_hash_table_clear(fctx->ref_hash);
  385.    util_hash_table_destroy(fctx->ref_hash);
  386.    FREE(fctx);
  387. }
  388. #endif
  389.