Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28.  
  29. #include "main/glheader.h"
  30. #include "main/context.h"
  31. #include "main/extensions.h"
  32. #include "main/fbobject.h"
  33. #include "main/framebuffer.h"
  34. #include "main/imports.h"
  35. #include "main/points.h"
  36. #include "main/renderbuffer.h"
  37.  
  38. #include "swrast/swrast.h"
  39. #include "swrast_setup/swrast_setup.h"
  40. #include "tnl/tnl.h"
  41. #include "drivers/common/driverfuncs.h"
  42. #include "drivers/common/meta.h"
  43.  
  44. #include "intel_chipset.h"
  45. #include "intel_buffers.h"
  46. #include "intel_tex.h"
  47. #include "intel_batchbuffer.h"
  48. #include "intel_clear.h"
  49. #include "intel_extensions.h"
  50. #include "intel_pixel.h"
  51. #include "intel_regions.h"
  52. #include "intel_buffer_objects.h"
  53. #include "intel_fbo.h"
  54. #include "intel_bufmgr.h"
  55. #include "intel_screen.h"
  56. #include "intel_mipmap_tree.h"
  57.  
  58. #include "utils.h"
  59. #include "../glsl/ralloc.h"
  60.  
  61. #ifndef INTEL_DEBUG
  62. int INTEL_DEBUG = (0);
  63. #endif
  64.  
  65.  
  66. static const GLubyte *
  67. intelGetString(struct gl_context * ctx, GLenum name)
  68. {
  69.    const struct intel_context *const intel = intel_context(ctx);
  70.    const char *chipset;
  71.    static char buffer[128];
  72.  
  73.    switch (name) {
  74.    case GL_VENDOR:
  75.       return (GLubyte *) "Intel Open Source Technology Center";
  76.       break;
  77.  
  78.    case GL_RENDERER:
  79.       switch (intel->intelScreen->deviceID) {
  80. #undef CHIPSET
  81. #define CHIPSET(id, symbol, str) case id: chipset = str; break;
  82. #include "pci_ids/i915_pci_ids.h"
  83.       default:
  84.          chipset = "Unknown Intel Chipset";
  85.          break;
  86.       }
  87.  
  88.       (void) driGetRendererString(buffer, chipset, 0);
  89.       return (GLubyte *) buffer;
  90.  
  91.    default:
  92.       return NULL;
  93.    }
  94. }
  95.  
  96. static void
  97. intel_flush_front(struct gl_context *ctx)
  98. {
  99.    struct intel_context *intel = intel_context(ctx);
  100.     __DRIcontext *driContext = intel->driContext;
  101.     __DRIdrawable *driDrawable = driContext->driDrawablePriv;
  102.     __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
  103.  
  104.     if (intel->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  105.       if (screen->dri2.loader->flushFrontBuffer != NULL &&
  106.           driDrawable &&
  107.           driDrawable->loaderPrivate) {
  108.          screen->dri2.loader->flushFrontBuffer(driDrawable,
  109.                                                driDrawable->loaderPrivate);
  110.  
  111.          /* We set the dirty bit in intel_prepare_render() if we're
  112.           * front buffer rendering once we get there.
  113.           */
  114.          intel->front_buffer_dirty = false;
  115.       }
  116.    }
  117. }
  118.  
  119. static unsigned
  120. intel_bits_per_pixel(const struct intel_renderbuffer *rb)
  121. {
  122.    return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
  123. }
  124.  
  125. static void
  126. intel_query_dri2_buffers(struct intel_context *intel,
  127.                          __DRIdrawable *drawable,
  128.                          __DRIbuffer **buffers,
  129.                          int *count);
  130.  
  131. static void
  132. intel_process_dri2_buffer(struct intel_context *intel,
  133.                           __DRIdrawable *drawable,
  134.                           __DRIbuffer *buffer,
  135.                           struct intel_renderbuffer *rb,
  136.                           const char *buffer_name);
  137.  
  138. void
  139. intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
  140. {
  141.    struct gl_framebuffer *fb = drawable->driverPrivate;
  142.    struct intel_renderbuffer *rb;
  143.    struct intel_context *intel = context->driverPrivate;
  144.    __DRIbuffer *buffers = NULL;
  145.    int i, count;
  146.    const char *region_name;
  147.  
  148.    /* Set this up front, so that in case our buffers get invalidated
  149.     * while we're getting new buffers, we don't clobber the stamp and
  150.     * thus ignore the invalidate. */
  151.    drawable->lastStamp = drawable->dri2.stamp;
  152.  
  153.    if (unlikely(INTEL_DEBUG & DEBUG_DRI))
  154.       fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
  155.  
  156.    intel_query_dri2_buffers(intel, drawable, &buffers, &count);
  157.  
  158.    if (buffers == NULL)
  159.       return;
  160.  
  161.    for (i = 0; i < count; i++) {
  162.        switch (buffers[i].attachment) {
  163.        case __DRI_BUFFER_FRONT_LEFT:
  164.            rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
  165.            region_name = "dri2 front buffer";
  166.            break;
  167.  
  168.        case __DRI_BUFFER_FAKE_FRONT_LEFT:
  169.            rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
  170.            region_name = "dri2 fake front buffer";
  171.            break;
  172.  
  173.        case __DRI_BUFFER_BACK_LEFT:
  174.            rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
  175.            region_name = "dri2 back buffer";
  176.            break;
  177.  
  178.        case __DRI_BUFFER_DEPTH:
  179.        case __DRI_BUFFER_HIZ:
  180.        case __DRI_BUFFER_DEPTH_STENCIL:
  181.        case __DRI_BUFFER_STENCIL:
  182.        case __DRI_BUFFER_ACCUM:
  183.        default:
  184.            fprintf(stderr,
  185.                    "unhandled buffer attach event, attachment type %d\n",
  186.                    buffers[i].attachment);
  187.            return;
  188.        }
  189.  
  190.        intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
  191.    }
  192.  
  193.    driUpdateFramebufferSize(&intel->ctx, drawable);
  194. }
  195.  
  196. /**
  197.  * intel_prepare_render should be called anywhere that curent read/drawbuffer
  198.  * state is required.
  199.  */
  200. void
  201. intel_prepare_render(struct intel_context *intel)
  202. {
  203.    __DRIcontext *driContext = intel->driContext;
  204.    __DRIdrawable *drawable;
  205.  
  206.    drawable = driContext->driDrawablePriv;
  207.    if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
  208.       if (drawable->lastStamp != drawable->dri2.stamp)
  209.          intel_update_renderbuffers(driContext, drawable);
  210.       intel_draw_buffer(&intel->ctx);
  211.       driContext->dri2.draw_stamp = drawable->dri2.stamp;
  212.    }
  213.  
  214.    drawable = driContext->driReadablePriv;
  215.    if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
  216.       if (drawable->lastStamp != drawable->dri2.stamp)
  217.          intel_update_renderbuffers(driContext, drawable);
  218.       driContext->dri2.read_stamp = drawable->dri2.stamp;
  219.    }
  220.  
  221.    /* If we're currently rendering to the front buffer, the rendering
  222.     * that will happen next will probably dirty the front buffer.  So
  223.     * mark it as dirty here.
  224.     */
  225.    if (intel->is_front_buffer_rendering)
  226.       intel->front_buffer_dirty = true;
  227.  
  228.    /* Wait for the swapbuffers before the one we just emitted, so we
  229.     * don't get too many swaps outstanding for apps that are GPU-heavy
  230.     * but not CPU-heavy.
  231.     *
  232.     * We're using intelDRI2Flush (called from the loader before
  233.     * swapbuffer) and glFlush (for front buffer rendering) as the
  234.     * indicator that a frame is done and then throttle when we get
  235.     * here as we prepare to render the next frame.  At this point for
  236.     * round trips for swap/copy and getting new buffers are done and
  237.     * we'll spend less time waiting on the GPU.
  238.     *
  239.     * Unfortunately, we don't have a handle to the batch containing
  240.     * the swap, and getting our hands on that doesn't seem worth it,
  241.     * so we just us the first batch we emitted after the last swap.
  242.     */
  243.    if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
  244.       if (!intel->disable_throttling)
  245.          drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
  246.       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
  247.       intel->first_post_swapbuffers_batch = NULL;
  248.       intel->need_throttle = false;
  249.    }
  250. }
  251.  
  252. static void
  253. intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
  254. {
  255.     struct intel_context *intel = intel_context(ctx);
  256.     __DRIcontext *driContext = intel->driContext;
  257.  
  258.     if (intel->saved_viewport)
  259.         intel->saved_viewport(ctx, x, y, w, h);
  260.  
  261.     if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  262.        dri2InvalidateDrawable(driContext->driDrawablePriv);
  263.        dri2InvalidateDrawable(driContext->driReadablePriv);
  264.     }
  265. }
  266.  
  267. static const struct dri_debug_control debug_control[] = {
  268.    { "tex",   DEBUG_TEXTURE},
  269.    { "state", DEBUG_STATE},
  270.    { "blit",  DEBUG_BLIT},
  271.    { "mip",   DEBUG_MIPTREE},
  272.    { "fall",  DEBUG_PERF},
  273.    { "perf",  DEBUG_PERF},
  274.    { "bat",   DEBUG_BATCH},
  275.    { "pix",   DEBUG_PIXEL},
  276.    { "buf",   DEBUG_BUFMGR},
  277.    { "reg",   DEBUG_REGION},
  278.    { "fbo",   DEBUG_FBO},
  279.    { "fs",    DEBUG_WM },
  280.    { "sync",  DEBUG_SYNC},
  281.    { "dri",   DEBUG_DRI },
  282.    { "stats", DEBUG_STATS },
  283.    { "wm",    DEBUG_WM },
  284.    { "aub",   DEBUG_AUB },
  285.    { NULL,    0 }
  286. };
  287.  
  288.  
  289. static void
  290. intelInvalidateState(struct gl_context * ctx, GLuint new_state)
  291. {
  292.     struct intel_context *intel = intel_context(ctx);
  293.  
  294.     if (ctx->swrast_context)
  295.        _swrast_InvalidateState(ctx, new_state);
  296.    _vbo_InvalidateState(ctx, new_state);
  297.  
  298.    intel->NewGLState |= new_state;
  299.  
  300.    if (intel->vtbl.invalidate_state)
  301.       intel->vtbl.invalidate_state( intel, new_state );
  302. }
  303.  
  304. void
  305. intel_flush_rendering_to_batch(struct gl_context *ctx)
  306. {
  307.    struct intel_context *intel = intel_context(ctx);
  308.  
  309.    if (intel->Fallback)
  310.       _swrast_flush(ctx);
  311.  
  312.    INTEL_FIREVERTICES(intel);
  313. }
  314.  
  315. void
  316. _intel_flush(struct gl_context *ctx, const char *file, int line)
  317. {
  318.    struct intel_context *intel = intel_context(ctx);
  319.  
  320.    intel_flush_rendering_to_batch(ctx);
  321.  
  322.    if (intel->batch.used)
  323.       _intel_batchbuffer_flush(intel, file, line);
  324. }
  325.  
  326. static void
  327. intel_glFlush(struct gl_context *ctx)
  328. {
  329.    struct intel_context *intel = intel_context(ctx);
  330.  
  331.    intel_flush(ctx);
  332.    intel_flush_front(ctx);
  333.    if (intel->is_front_buffer_rendering)
  334.       intel->need_throttle = true;
  335. }
  336.  
  337. void
  338. intelFinish(struct gl_context * ctx)
  339. {
  340.    struct intel_context *intel = intel_context(ctx);
  341.  
  342.    intel_flush(ctx);
  343.    intel_flush_front(ctx);
  344.  
  345.    if (intel->batch.last_bo)
  346.       drm_intel_bo_wait_rendering(intel->batch.last_bo);
  347. }
  348.  
  349. void
  350. intelInitDriverFunctions(struct dd_function_table *functions)
  351. {
  352.    _mesa_init_driver_functions(functions);
  353.  
  354.    functions->Flush = intel_glFlush;
  355.    functions->Finish = intelFinish;
  356.    functions->GetString = intelGetString;
  357.    functions->UpdateState = intelInvalidateState;
  358.  
  359.    intelInitTextureFuncs(functions);
  360.    intelInitTextureImageFuncs(functions);
  361.    intelInitTextureSubImageFuncs(functions);
  362.    intelInitTextureCopyImageFuncs(functions);
  363.    intelInitClearFuncs(functions);
  364.    intelInitBufferFuncs(functions);
  365.    intelInitPixelFuncs(functions);
  366.    intelInitBufferObjectFuncs(functions);
  367.    intel_init_syncobj_functions(functions);
  368. }
  369.  
  370. static bool
  371. validate_context_version(struct intel_screen *screen,
  372.                          int mesa_api,
  373.                          unsigned major_version,
  374.                          unsigned minor_version,
  375.                          unsigned *dri_ctx_error)
  376. {
  377.    unsigned req_version = 10 * major_version + minor_version;
  378.    unsigned max_version = 0;
  379.  
  380.    switch (mesa_api) {
  381.    case API_OPENGL_COMPAT:
  382.       max_version = screen->max_gl_compat_version;
  383.       break;
  384.    case API_OPENGL_CORE:
  385.       max_version = screen->max_gl_core_version;
  386.       break;
  387.    case API_OPENGLES:
  388.       max_version = screen->max_gl_es1_version;
  389.       break;
  390.    case API_OPENGLES2:
  391.       max_version = screen->max_gl_es2_version;
  392.       break;
  393.    default:
  394.       max_version = 0;
  395.       break;
  396.    }
  397.  
  398.    if (max_version == 0) {
  399.       *dri_ctx_error = __DRI_CTX_ERROR_BAD_API;
  400.       return false;
  401.    } else if (req_version > max_version) {
  402.       *dri_ctx_error = __DRI_CTX_ERROR_BAD_VERSION;
  403.       return false;
  404.    }
  405.  
  406.    return true;
  407. }
  408.  
  409. bool
  410. intelInitContext(struct intel_context *intel,
  411.                  int api,
  412.                  unsigned major_version,
  413.                  unsigned minor_version,
  414.                  const struct gl_config * mesaVis,
  415.                  __DRIcontext * driContextPriv,
  416.                  void *sharedContextPrivate,
  417.                  struct dd_function_table *functions,
  418.                  unsigned *dri_ctx_error)
  419. {
  420.    struct gl_context *ctx = &intel->ctx;
  421.    struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
  422.    __DRIscreen *sPriv = driContextPriv->driScreenPriv;
  423.    struct intel_screen *intelScreen = sPriv->driverPrivate;
  424.    int bo_reuse_mode;
  425.    struct gl_config visual;
  426.  
  427.    /* we can't do anything without a connection to the device */
  428.    if (intelScreen->bufmgr == NULL) {
  429.       *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
  430.       return false;
  431.    }
  432.  
  433.    if (!validate_context_version(intelScreen,
  434.                                  api, major_version, minor_version,
  435.                                  dri_ctx_error))
  436.       return false;
  437.  
  438.    /* Can't rely on invalidate events, fall back to glViewport hack */
  439.    if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
  440.       intel->saved_viewport = functions->Viewport;
  441.       functions->Viewport = intel_viewport;
  442.    }
  443.  
  444.    if (mesaVis == NULL) {
  445.       memset(&visual, 0, sizeof visual);
  446.       mesaVis = &visual;
  447.    }
  448.  
  449.    intel->intelScreen = intelScreen;
  450.  
  451.    if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
  452.                                  functions)) {
  453.       *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
  454.       printf("%s: failed to init mesa context\n", __FUNCTION__);
  455.       return false;
  456.    }
  457.  
  458.    driContextPriv->driverPrivate = intel;
  459.    intel->driContext = driContextPriv;
  460.    intel->driFd = sPriv->fd;
  461.  
  462.    intel->gen = intelScreen->gen;
  463.  
  464.    const int devID = intelScreen->deviceID;
  465.  
  466.    intel->is_945 = IS_945(devID);
  467.  
  468.    intel->has_swizzling = intel->intelScreen->hw_has_swizzling;
  469.  
  470.    memset(&ctx->TextureFormatSupported,
  471.           0, sizeof(ctx->TextureFormatSupported));
  472.  
  473.    driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
  474.                        sPriv->myNum, "i915");
  475.    intel->maxBatchSize = 4096;
  476.  
  477.    /* Estimate the size of the mappable aperture into the GTT.  There's an
  478.     * ioctl to get the whole GTT size, but not one to get the mappable subset.
  479.     * It turns out it's basically always 256MB, though some ancient hardware
  480.     * was smaller.
  481.     */
  482.    uint32_t gtt_size = 256 * 1024 * 1024;
  483.    if (intel->gen == 2)
  484.       gtt_size = 128 * 1024 * 1024;
  485.  
  486.    /* We don't want to map two objects such that a memcpy between them would
  487.     * just fault one mapping in and then the other over and over forever.  So
  488.     * we would need to divide the GTT size by 2.  Additionally, some GTT is
  489.     * taken up by things like the framebuffer and the ringbuffer and such, so
  490.     * be more conservative.
  491.     */
  492.    intel->max_gtt_map_object_size = gtt_size / 4;
  493.  
  494.    intel->bufmgr = intelScreen->bufmgr;
  495.  
  496.    bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
  497.    switch (bo_reuse_mode) {
  498.    case DRI_CONF_BO_REUSE_DISABLED:
  499.       break;
  500.    case DRI_CONF_BO_REUSE_ALL:
  501.       intel_bufmgr_gem_enable_reuse(intel->bufmgr);
  502.       break;
  503.    }
  504.  
  505.    ctx->Const.MinLineWidth = 1.0;
  506.    ctx->Const.MinLineWidthAA = 1.0;
  507.    ctx->Const.MaxLineWidth = 5.0;
  508.    ctx->Const.MaxLineWidthAA = 5.0;
  509.    ctx->Const.LineWidthGranularity = 0.5;
  510.  
  511.    ctx->Const.MinPointSize = 1.0;
  512.    ctx->Const.MinPointSizeAA = 1.0;
  513.    ctx->Const.MaxPointSize = 255.0;
  514.    ctx->Const.MaxPointSizeAA = 3.0;
  515.    ctx->Const.PointSizeGranularity = 1.0;
  516.  
  517.    ctx->Const.StripTextureBorder = GL_TRUE;
  518.  
  519.    /* reinitialize the context point state.
  520.     * It depend on constants in __struct gl_contextRec::Const
  521.     */
  522.    _mesa_init_point(ctx);
  523.  
  524.    ctx->Const.MaxRenderbufferSize = 2048;
  525.  
  526.    _swrast_CreateContext(ctx);
  527.    _vbo_CreateContext(ctx);
  528.    if (ctx->swrast_context) {
  529.       _tnl_CreateContext(ctx);
  530.       _swsetup_CreateContext(ctx);
  531.  
  532.       /* Configure swrast to match hardware characteristics: */
  533.       _swrast_allow_pixel_fog(ctx, false);
  534.       _swrast_allow_vertex_fog(ctx, true);
  535.    }
  536.  
  537.    _mesa_meta_init(ctx);
  538.  
  539.    intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
  540.    intel->hw_stipple = 1;
  541.  
  542.    intel->RenderIndex = ~0;
  543.  
  544.    intelInitExtensions(ctx);
  545.  
  546.    INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
  547.    if (INTEL_DEBUG & DEBUG_BUFMGR)
  548.       dri_bufmgr_set_debug(intel->bufmgr, true);
  549.    if (INTEL_DEBUG & DEBUG_PERF)
  550.       intel->perf_debug = true;
  551.  
  552.    if (INTEL_DEBUG & DEBUG_AUB)
  553.       drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
  554.  
  555.    intel_batchbuffer_init(intel);
  556.  
  557.    intel_fbo_init(intel);
  558.  
  559.    intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
  560.  
  561.    intel->prim.primitive = ~0;
  562.  
  563.    /* Force all software fallbacks */
  564.    if (driQueryOptionb(&intel->optionCache, "no_rast")) {
  565.       fprintf(stderr, "disabling 3D rasterization\n");
  566.       intel->no_rast = 1;
  567.    }
  568.  
  569.    if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
  570.       fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
  571.       intel->always_flush_batch = 1;
  572.    }
  573.  
  574.    if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
  575.       fprintf(stderr, "flushing GPU caches before/after each draw call\n");
  576.       intel->always_flush_cache = 1;
  577.    }
  578.  
  579.    if (driQueryOptionb(&intel->optionCache, "disable_throttling")) {
  580.       fprintf(stderr, "disabling flush throttling\n");
  581.       intel->disable_throttling = 1;
  582.    }
  583.  
  584.    return true;
  585. }
  586.  
  587. void
  588. intelDestroyContext(__DRIcontext * driContextPriv)
  589. {
  590.    struct intel_context *intel =
  591.       (struct intel_context *) driContextPriv->driverPrivate;
  592.    struct gl_context *ctx = &intel->ctx;
  593.  
  594.    assert(intel);               /* should never be null */
  595.    if (intel) {
  596.       INTEL_FIREVERTICES(intel);
  597.  
  598.       /* Dump a final BMP in case the application doesn't call SwapBuffers */
  599.       if (INTEL_DEBUG & DEBUG_AUB) {
  600.          intel_batchbuffer_flush(intel);
  601.          aub_dump_bmp(&intel->ctx);
  602.       }
  603.  
  604.       _mesa_meta_free(&intel->ctx);
  605.  
  606.       intel->vtbl.destroy(intel);
  607.  
  608.       if (ctx->swrast_context) {
  609.          _swsetup_DestroyContext(&intel->ctx);
  610.          _tnl_DestroyContext(&intel->ctx);
  611.       }
  612.       _vbo_DestroyContext(&intel->ctx);
  613.  
  614.       if (ctx->swrast_context)
  615.          _swrast_DestroyContext(&intel->ctx);
  616.       intel->Fallback = 0x0;      /* don't call _swrast_Flush later */
  617.  
  618.       intel_batchbuffer_free(intel);
  619.  
  620.       free(intel->prim.vb);
  621.       intel->prim.vb = NULL;
  622.       drm_intel_bo_unreference(intel->prim.vb_bo);
  623.       intel->prim.vb_bo = NULL;
  624.       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
  625.       intel->first_post_swapbuffers_batch = NULL;
  626.  
  627.       driDestroyOptionCache(&intel->optionCache);
  628.  
  629.       /* free the Mesa context */
  630.       _mesa_free_context_data(&intel->ctx);
  631.  
  632.       _math_matrix_dtr(&intel->ViewportMatrix);
  633.  
  634.       ralloc_free(intel);
  635.       driContextPriv->driverPrivate = NULL;
  636.    }
  637. }
  638.  
  639. GLboolean
  640. intelUnbindContext(__DRIcontext * driContextPriv)
  641. {
  642.    /* Unset current context and dispath table */
  643.    _mesa_make_current(NULL, NULL, NULL);
  644.  
  645.    return true;
  646. }
  647.  
  648. GLboolean
  649. intelMakeCurrent(__DRIcontext * driContextPriv,
  650.                  __DRIdrawable * driDrawPriv,
  651.                  __DRIdrawable * driReadPriv)
  652. {
  653.    struct intel_context *intel;
  654.    GET_CURRENT_CONTEXT(curCtx);
  655.  
  656.    if (driContextPriv)
  657.       intel = (struct intel_context *) driContextPriv->driverPrivate;
  658.    else
  659.       intel = NULL;
  660.  
  661.    /* According to the glXMakeCurrent() man page: "Pending commands to
  662.     * the previous context, if any, are flushed before it is released."
  663.     * But only flush if we're actually changing contexts.
  664.     */
  665.    if (intel_context(curCtx) && intel_context(curCtx) != intel) {
  666.       _mesa_flush(curCtx);
  667.    }
  668.  
  669.    if (driContextPriv) {
  670.       struct gl_context *ctx = &intel->ctx;
  671.       struct gl_framebuffer *fb, *readFb;
  672.      
  673.       if (driDrawPriv == NULL && driReadPriv == NULL) {
  674.          fb = _mesa_get_incomplete_framebuffer();
  675.          readFb = _mesa_get_incomplete_framebuffer();
  676.       } else {
  677.          fb = driDrawPriv->driverPrivate;
  678.          readFb = driReadPriv->driverPrivate;
  679.          driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
  680.          driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
  681.       }
  682.  
  683.       intel_prepare_render(intel);
  684.       _mesa_make_current(ctx, fb, readFb);
  685.  
  686.       /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
  687.        * is NULL at that point.  We can't call _mesa_makecurrent()
  688.        * first, since we need the buffer size for the initial
  689.        * viewport.  So just call intel_draw_buffer() again here. */
  690.       intel_draw_buffer(ctx);
  691.    }
  692.    else {
  693.       _mesa_make_current(NULL, NULL, NULL);
  694.    }
  695.  
  696.    return true;
  697. }
  698.  
  699. /**
  700.  * \brief Query DRI2 to obtain a DRIdrawable's buffers.
  701.  *
  702.  * To determine which DRI buffers to request, examine the renderbuffers
  703.  * attached to the drawable's framebuffer. Then request the buffers with
  704.  * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
  705.  *
  706.  * This is called from intel_update_renderbuffers().
  707.  *
  708.  * \param drawable      Drawable whose buffers are queried.
  709.  * \param buffers       [out] List of buffers returned by DRI2 query.
  710.  * \param buffer_count  [out] Number of buffers returned.
  711.  *
  712.  * \see intel_update_renderbuffers()
  713.  * \see DRI2GetBuffers()
  714.  * \see DRI2GetBuffersWithFormat()
  715.  */
  716. static void
  717. intel_query_dri2_buffers(struct intel_context *intel,
  718.                          __DRIdrawable *drawable,
  719.                          __DRIbuffer **buffers,
  720.                          int *buffer_count)
  721. {
  722.    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
  723.    struct gl_framebuffer *fb = drawable->driverPrivate;
  724.    int i = 0;
  725.    unsigned attachments[8];
  726.  
  727.    struct intel_renderbuffer *front_rb;
  728.    struct intel_renderbuffer *back_rb;
  729.  
  730.    front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
  731.    back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
  732.  
  733.    memset(attachments, 0, sizeof(attachments));
  734.    if ((intel->is_front_buffer_rendering ||
  735.         intel->is_front_buffer_reading ||
  736.         !back_rb) && front_rb) {
  737.       /* If a fake front buffer is in use, then querying for
  738.        * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
  739.        * the real front buffer to the fake front buffer.  So before doing the
  740.        * query, we need to make sure all the pending drawing has landed in the
  741.        * real front buffer.
  742.        */
  743.       intel_flush(&intel->ctx);
  744.       intel_flush_front(&intel->ctx);
  745.  
  746.       attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
  747.       attachments[i++] = intel_bits_per_pixel(front_rb);
  748.    } else if (front_rb && intel->front_buffer_dirty) {
  749.       /* We have pending front buffer rendering, but we aren't querying for a
  750.        * front buffer.  If the front buffer we have is a fake front buffer,
  751.        * the X server is going to throw it away when it processes the query.
  752.        * So before doing the query, make sure all the pending drawing has
  753.        * landed in the real front buffer.
  754.        */
  755.       intel_flush(&intel->ctx);
  756.       intel_flush_front(&intel->ctx);
  757.    }
  758.  
  759.    if (back_rb) {
  760.       attachments[i++] = __DRI_BUFFER_BACK_LEFT;
  761.       attachments[i++] = intel_bits_per_pixel(back_rb);
  762.    }
  763.  
  764.    assert(i <= ARRAY_SIZE(attachments));
  765.  
  766.    *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
  767.                                                         &drawable->w,
  768.                                                         &drawable->h,
  769.                                                         attachments, i / 2,
  770.                                                         buffer_count,
  771.                                                         drawable->loaderPrivate);
  772. }
  773.  
  774. /**
  775.  * \brief Assign a DRI buffer's DRM region to a renderbuffer.
  776.  *
  777.  * This is called from intel_update_renderbuffers().
  778.  *
  779.  * \par Note:
  780.  *    DRI buffers whose attachment point is DRI2BufferStencil or
  781.  *    DRI2BufferDepthStencil are handled as special cases.
  782.  *
  783.  * \param buffer_name is a human readable name, such as "dri2 front buffer",
  784.  *        that is passed to intel_region_alloc_for_handle().
  785.  *
  786.  * \see intel_update_renderbuffers()
  787.  * \see intel_region_alloc_for_handle()
  788.  */
  789. static void
  790. intel_process_dri2_buffer(struct intel_context *intel,
  791.                           __DRIdrawable *drawable,
  792.                           __DRIbuffer *buffer,
  793.                           struct intel_renderbuffer *rb,
  794.                           const char *buffer_name)
  795. {
  796.    struct intel_region *region = NULL;
  797.  
  798.    if (!rb)
  799.       return;
  800.  
  801.    /* We try to avoid closing and reopening the same BO name, because the first
  802.     * use of a mapping of the buffer involves a bunch of page faulting which is
  803.     * moderately expensive.
  804.     */
  805.    if (rb->mt &&
  806.        rb->mt->region &&
  807.        rb->mt->region->name == buffer->name)
  808.       return;
  809.  
  810.    if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
  811.       fprintf(stderr,
  812.               "attaching buffer %d, at %d, cpp %d, pitch %d\n",
  813.               buffer->name, buffer->attachment,
  814.               buffer->cpp, buffer->pitch);
  815.    }
  816.  
  817.    intel_miptree_release(&rb->mt);
  818.    region = intel_region_alloc_for_handle(intel->intelScreen,
  819.                                           buffer->cpp,
  820.                                           drawable->w,
  821.                                           drawable->h,
  822.                                           buffer->pitch,
  823.                                           buffer->name,
  824.                                           buffer_name);
  825.    if (!region)
  826.       return;
  827.  
  828.    rb->mt = intel_miptree_create_for_dri2_buffer(intel,
  829.                                                  buffer->attachment,
  830.                                                  intel_rb_format(rb),
  831.                                                  region);
  832.    intel_region_release(&region);
  833. }
  834.