Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
  2.  
  3. /*
  4.  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice (including the next
  14.  * paragraph) shall be included in all copies or substantial portions of the
  15.  * Software.
  16.  *
  17.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  20.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22.  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  23.  * SOFTWARE.
  24.  *
  25.  * Authors:
  26.  *    Rob Clark <robclark@freedesktop.org>
  27.  */
  28.  
  29. #ifndef FREEDRENO_QUERY_HW_H_
  30. #define FREEDRENO_QUERY_HW_H_
  31.  
  32. #include "util/list.h"
  33.  
  34. #include "freedreno_query.h"
  35. #include "freedreno_context.h"
  36.  
  37.  
  38. /*
  39.  * HW Queries:
  40.  *
  41.  * See: https://github.com/freedreno/freedreno/wiki/Queries#hardware-queries
  42.  *
  43.  * Hardware queries will be specific to gpu generation, but they need
  44.  * some common infrastructure for triggering start/stop samples at
  45.  * various points (for example, to exclude mem2gmem/gmem2mem or clear)
  46.  * as well as per tile tracking.
  47.  *
  48.  * NOTE: in at least some cases hw writes sample values to memory addr
  49.  * specified in some register.  So we don't really have the option to
  50.  * just sample the same counter multiple times for multiple different
  51.  * queries with the same query_type.  So we cache per sample provider
  52.  * the most recent sample since the last draw.  This way multiple
  53.  * sample periods for multiple queries can reference the same sample.
  54.  *
  55.  * fd_hw_sample_provider:
  56.  *   - one per query type, registered/implemented by gpu generation
  57.  *     specific code
  58.  *   - can construct fd_hw_samples on demand
  59.  *   - most recent sample (since last draw) cached so multiple
  60.  *     different queries can ref the same sample
  61.  *
  62.  * fd_hw_sample:
  63.  *   - abstracts one snapshot of counter value(s) across N tiles
  64.  *   - backing object not allocated until submit time when number
  65.  *     of samples and number of tiles is known
  66.  *
  67.  * fd_hw_sample_period:
  68.  *   - consists of start and stop sample
  69.  *   - a query accumulates a list of sample periods
  70.  *   - the query result is the sum of the sample periods
  71.  */
  72.  
  73. struct fd_hw_sample_provider {
  74.         unsigned query_type;
  75.  
  76.         /* stages applicable to the query type: */
  77.         enum fd_render_stage active;
  78.  
  79.         /* when a new sample is required, emit appropriate cmdstream
  80.          * and return a sample object:
  81.          */
  82.         struct fd_hw_sample *(*get_sample)(struct fd_context *ctx,
  83.                         struct fd_ringbuffer *ring);
  84.  
  85.         /* accumulate the results from specified sample period: */
  86.         void (*accumulate_result)(struct fd_context *ctx,
  87.                         const void *start, const void *end,
  88.                         union pipe_query_result *result);
  89. };
  90.  
  91. struct fd_hw_sample {
  92.         struct pipe_reference reference;  /* keep this first */
  93.  
  94.         /* offset and size of the sample are know at the time the
  95.          * sample is constructed.
  96.          */
  97.         uint32_t size;
  98.         uint32_t offset;
  99.  
  100.         /* backing object, offset/stride/etc are determined not when
  101.          * the sample is constructed, but when the batch is submitted.
  102.          * This way we can defer allocation until total # of requested
  103.          * samples, and total # of tiles, is known.
  104.          */
  105.         struct fd_bo *bo;
  106.         uint32_t num_tiles;
  107.         uint32_t tile_stride;
  108. };
  109.  
  110. struct fd_hw_sample_period;
  111.  
  112. struct fd_hw_query {
  113.         struct fd_query base;
  114.  
  115.         const struct fd_hw_sample_provider *provider;
  116.  
  117.         /* list of fd_hw_sample_period in previous submits: */
  118.         struct list_head periods;
  119.  
  120.         /* list of fd_hw_sample_period's in current submit: */
  121.         struct list_head current_periods;
  122.  
  123.         /* if active and not paused, the current sample period (not
  124.          * yet added to current_periods):
  125.          */
  126.         struct fd_hw_sample_period *period;
  127.  
  128.         struct list_head list;  /* list-node in ctx->active_queries */
  129. };
  130.  
  131. static inline struct fd_hw_query *
  132. fd_hw_query(struct fd_query *q)
  133. {
  134.         return (struct fd_hw_query *)q;
  135. }
  136.  
  137. struct fd_query * fd_hw_create_query(struct fd_context *ctx, unsigned query_type);
  138. /* helper for sample providers: */
  139. struct fd_hw_sample * fd_hw_sample_init(struct fd_context *ctx, uint32_t size);
  140. /* don't call directly, use fd_hw_sample_reference() */
  141. void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
  142. void fd_hw_query_prepare(struct fd_context *ctx, uint32_t num_tiles);
  143. void fd_hw_query_prepare_tile(struct fd_context *ctx, uint32_t n,
  144.                 struct fd_ringbuffer *ring);
  145. void fd_hw_query_set_stage(struct fd_context *ctx,
  146.                 struct fd_ringbuffer *ring, enum fd_render_stage stage);
  147. void fd_hw_query_register_provider(struct pipe_context *pctx,
  148.                 const struct fd_hw_sample_provider *provider);
  149. void fd_hw_query_init(struct pipe_context *pctx);
  150. void fd_hw_query_fini(struct pipe_context *pctx);
  151.  
  152. static inline void
  153. fd_hw_sample_reference(struct fd_context *ctx,
  154.                 struct fd_hw_sample **ptr, struct fd_hw_sample *samp)
  155. {
  156.         struct fd_hw_sample *old_samp = *ptr;
  157.  
  158.         if (pipe_reference(&(*ptr)->reference, &samp->reference))
  159.                 __fd_hw_sample_destroy(ctx, old_samp);
  160.         if (ptr)
  161.                 *ptr = samp;
  162. }
  163.  
  164. #endif /* FREEDRENO_QUERY_HW_H_ */
  165.