Subversion Repositories Kolibri OS

Rev

Rev 4315 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /* -*- c-basic-offset: 4 -*- */
  2. /*
  3.  * Copyright © 2006 Intel Corporation
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the "Software"),
  7.  * to deal in the Software without restriction, including without limitation
  8.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9.  * and/or sell copies of the Software, and to permit persons to whom the
  10.  * Software is furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the next
  13.  * paragraph) shall be included in all copies or substantial portions of the
  14.  * Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21.  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22.  * SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Eric Anholt <eric@anholt.net>
  26.  *
  27.  */
  28.  
  29. #ifdef HAVE_CONFIG_H
  30. #include "config.h"
  31. #endif
  32.  
  33. #include <assert.h>
  34. #include <stdlib.h>
  35. #include <errno.h>
  36. #include <memory.h>
  37.  
  38. //#include "xf86.h"
  39. #include "intel.h"
  40. #include "i830_reg.h"
  41. #include "i915_drm.h"
  42. #include "i965_reg.h"
  43.  
  44. //#include "uxa.h"
  45.  
  46. #define DUMP_BATCHBUFFERS NULL // "/tmp/i915-batchbuffers.dump"
  47.  
  48. #define DBG printf
  49.  
  50. static void intel_end_vertex(intel_screen_private *intel)
  51. {
  52.         if (intel->vertex_bo) {
  53.                 if (intel->vertex_used) {
  54.                         dri_bo_subdata(intel->vertex_bo, 0, intel->vertex_used*4, intel->vertex_ptr);
  55.                         intel->vertex_used = 0;
  56.                 }
  57.  
  58.                 dri_bo_unreference(intel->vertex_bo);
  59.                 intel->vertex_bo = NULL;
  60.         }
  61.  
  62.         intel->vertex_id = 0;
  63. }
  64.  
  65. void intel_next_vertex(intel_screen_private *intel)
  66. {
  67.         intel_end_vertex(intel);
  68.  
  69.         intel->vertex_bo =
  70.                 dri_bo_alloc(intel->bufmgr, "vertex", sizeof (intel->vertex_ptr), 4096);
  71. }
  72.  
  73. static dri_bo *bo_alloc()
  74. {
  75.         intel_screen_private *intel = intel_get_screen_private();
  76.         int size = 4 * 4096;
  77.         /* The 865 has issues with larger-than-page-sized batch buffers. */
  78.         if (IS_I865G(intel))
  79.                 size = 4096;
  80.         return dri_bo_alloc(intel->bufmgr, "batch", size, 4096);
  81. }
  82.  
  83. static void intel_next_batch(int mode)
  84. {
  85.         intel_screen_private *intel = intel_get_screen_private();
  86.         dri_bo *tmp;
  87.  
  88.         drm_intel_gem_bo_clear_relocs(intel->batch_bo, 0);
  89.  
  90.         tmp = intel->last_batch_bo[mode];
  91.         intel->last_batch_bo[mode] = intel->batch_bo;
  92.         intel->batch_bo = tmp;
  93.  
  94.         intel->batch_used = 0;
  95.  
  96.         /* We don't know when another client has executed, so we have
  97.          * to reinitialize our 3D state per batch.
  98.          */
  99.         intel->last_3d = LAST_3D_OTHER;
  100. }
  101.  
  102. void intel_batch_init()
  103. {
  104.         intel_screen_private *intel = intel_get_screen_private();
  105.  
  106.     ENTER();
  107.  
  108.         intel->batch_emit_start = 0;
  109.         intel->batch_emitting = 0;
  110.         intel->vertex_id = 0;
  111.  
  112.         intel->last_batch_bo[0] = bo_alloc();
  113.         intel->last_batch_bo[1] = bo_alloc();
  114.  
  115.         intel->batch_bo = bo_alloc();
  116.         intel->batch_used = 0;
  117.         intel->last_3d = LAST_3D_OTHER;
  118.  
  119.     LEAVE();
  120. }
  121.  
  122. void intel_batch_teardown()
  123. {
  124.         intel_screen_private *intel = intel_get_screen_private();
  125.         int i;
  126.  
  127.         for (i = 0; i < ARRAY_SIZE(intel->last_batch_bo); i++) {
  128.                 if (intel->last_batch_bo[i] != NULL) {
  129.                         dri_bo_unreference(intel->last_batch_bo[i]);
  130.                         intel->last_batch_bo[i] = NULL;
  131.                 }
  132.         }
  133.  
  134.         if (intel->batch_bo != NULL) {
  135.                 dri_bo_unreference(intel->batch_bo);
  136.                 intel->batch_bo = NULL;
  137.         }
  138.  
  139.         if (intel->vertex_bo) {
  140.                 dri_bo_unreference(intel->vertex_bo);
  141.                 intel->vertex_bo = NULL;
  142.         }
  143.  
  144.         while (!list_is_empty(&intel->batch_pixmaps))
  145.                 list_del(intel->batch_pixmaps.next);
  146. }
  147.  
  148. static void intel_batch_do_flush()
  149. {
  150.         intel_screen_private *intel = intel_get_screen_private();
  151.         struct intel_pixmap *priv;
  152.  
  153.         list_for_each_entry(priv, &intel->batch_pixmaps, batch)
  154.                 priv->dirty = 0;
  155. }
  156.  
  157. static void intel_emit_post_sync_nonzero_flush()
  158. {
  159.         intel_screen_private *intel = intel_get_screen_private();
  160.  
  161.         /* keep this entire sequence of 3 PIPE_CONTROL cmds in one batch to
  162.          * avoid upsetting the gpu. */
  163.         BEGIN_BATCH(3*4);
  164.         OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
  165.         OUT_BATCH(BRW_PIPE_CONTROL_CS_STALL |
  166.                   BRW_PIPE_CONTROL_STALL_AT_SCOREBOARD);
  167.         OUT_BATCH(0); /* address */
  168.         OUT_BATCH(0); /* write data */
  169.  
  170.         OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
  171.         OUT_BATCH(BRW_PIPE_CONTROL_WRITE_QWORD);
  172.         OUT_RELOC(intel->wa_scratch_bo,
  173.                   I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
  174.         OUT_BATCH(0); /* write data */
  175.  
  176.         /* now finally the _real flush */
  177.         OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
  178.         OUT_BATCH(BRW_PIPE_CONTROL_WC_FLUSH |
  179.                   BRW_PIPE_CONTROL_TC_FLUSH |
  180.                   BRW_PIPE_CONTROL_NOWRITE);
  181.         OUT_BATCH(0); /* write address */
  182.         OUT_BATCH(0); /* write data */
  183.         ADVANCE_BATCH();
  184. }
  185.  
  186. void intel_batch_emit_flush()
  187. {
  188.         intel_screen_private *intel = intel_get_screen_private();
  189.         int flags;
  190.  
  191.         assert (!intel->in_batch_atomic);
  192.  
  193.         /* Big hammer, look to the pipelined flushes in future. */
  194.         if ((INTEL_INFO(intel)->gen >= 060)) {
  195.                 if (intel->current_batch == BLT_BATCH) {
  196.                         BEGIN_BATCH_BLT(4);
  197.                         OUT_BATCH(MI_FLUSH_DW | 2);
  198.                         OUT_BATCH(0);
  199.                         OUT_BATCH(0);
  200.                         OUT_BATCH(0);
  201.                         ADVANCE_BATCH();
  202.                 } else  {
  203.                         if ((INTEL_INFO(intel)->gen == 060)) {
  204.                                 /* HW-Workaround for Sandybdrige */
  205.                                 intel_emit_post_sync_nonzero_flush();
  206.                         } else {
  207.                                 BEGIN_BATCH(4);
  208.                                 OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
  209.                                 OUT_BATCH(BRW_PIPE_CONTROL_WC_FLUSH |
  210.                                           BRW_PIPE_CONTROL_TC_FLUSH |
  211.                                           BRW_PIPE_CONTROL_NOWRITE);
  212.                                 OUT_BATCH(0); /* write address */
  213.                                 OUT_BATCH(0); /* write data */
  214.                                 ADVANCE_BATCH();
  215.                         }
  216.                 }
  217.         } else {
  218.                 flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
  219.                 if (INTEL_INFO(intel)->gen >= 040)
  220.                         flags = 0;
  221.  
  222.                 BEGIN_BATCH(1);
  223.                 OUT_BATCH(MI_FLUSH | flags);
  224.                 ADVANCE_BATCH();
  225.         }
  226.         intel_batch_do_flush();
  227. }
  228.  
  229. void intel_batch_submit()
  230. {
  231.         intel_screen_private *intel = intel_get_screen_private();
  232.         int ret;
  233.  
  234.         assert (!intel->in_batch_atomic);
  235.  
  236.         if (intel->vertex_flush)
  237.                 intel->vertex_flush(intel);
  238.         intel_end_vertex(intel);
  239.  
  240.         if (intel->batch_flush)
  241.                 intel->batch_flush(intel);
  242.  
  243.         if (intel->batch_used == 0)
  244.                 return;
  245.  
  246.         /* Mark the end of the batchbuffer. */
  247.         OUT_BATCH(MI_BATCH_BUFFER_END);
  248.         /* Emit a padding dword if we aren't going to be quad-word aligned. */
  249.         if (intel->batch_used & 1)
  250.                 OUT_BATCH(MI_NOOP);
  251.  
  252.         if (DUMP_BATCHBUFFERS) {
  253.             FILE *file = fopen(DUMP_BATCHBUFFERS, "a");
  254.             if (file) {
  255.                 fwrite (intel->batch_ptr, intel->batch_used*4, 1, file);
  256.                 fclose(file);
  257.             }
  258.         }
  259.  
  260.         ret = dri_bo_subdata(intel->batch_bo, 0, intel->batch_used*4, intel->batch_ptr);
  261.         if (ret == 0) {
  262.                 ret = drm_intel_bo_mrb_exec(intel->batch_bo,
  263.                                 intel->batch_used*4,
  264.                                 NULL, 0, 0xffffffff,
  265.                                 (HAS_BLT(intel) ?
  266.                                  intel->current_batch:
  267.                                  I915_EXEC_DEFAULT));
  268.         }
  269.  
  270.         if (ret != 0) {
  271.                 static int once;
  272.                 if (!once) {
  273.                         if (ret == -EIO) {
  274.                                 /* The GPU has hung and unlikely to recover by this point. */
  275.                                 printf("Detected a hung GPU, disabling acceleration.\n");
  276.                                 printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
  277.                         } else {
  278.                                 /* The driver is broken. */
  279.                                 printf("Failed to submit batch buffer, expect rendering corruption\n ");
  280.                         }
  281. //                      uxa_set_force_fallback(xf86ScrnToScreen(scrn), TRUE);
  282.                         intel->force_fallback = TRUE;
  283.                         once = 1;
  284.                 }
  285.         }
  286.  
  287.         while (!list_is_empty(&intel->batch_pixmaps)) {
  288.                 struct intel_pixmap *entry;
  289.  
  290.                 entry = list_first_entry(&intel->batch_pixmaps,
  291.                                          struct intel_pixmap,
  292.                                          batch);
  293.  
  294.                 entry->busy = -1;
  295.                 entry->dirty = 0;
  296.                 list_del(&entry->batch);
  297.         }
  298.  
  299.         if (intel->debug_flush & DEBUG_FLUSH_WAIT)
  300.                 drm_intel_bo_wait_rendering(intel->batch_bo);
  301.  
  302.         intel_next_batch(intel->current_batch == I915_EXEC_BLT);
  303.  
  304.         if (intel->batch_commit_notify)
  305.                 intel->batch_commit_notify(intel);
  306.  
  307.         intel->current_batch = 0;
  308. }
  309.  
  310. void intel_debug_flush()
  311. {
  312.         intel_screen_private *intel = intel_get_screen_private();
  313.  
  314.         if (intel->debug_flush & DEBUG_FLUSH_CACHES)
  315.                 intel_batch_emit_flush();
  316.  
  317.         if (intel->debug_flush & DEBUG_FLUSH_BATCHES)
  318.                 intel_batch_submit();
  319. }
  320.