Subversion Repositories Kolibri OS

Rev

Rev 4358 | Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28.  
  29. #include "main/glheader.h"
  30. #include "main/context.h"
  31. #include "main/extensions.h"
  32. #include "main/fbobject.h"
  33. #include "main/framebuffer.h"
  34. #include "main/imports.h"
  35. #include "main/points.h"
  36. #include "main/renderbuffer.h"
  37.  
  38. #include "swrast/swrast.h"
  39. #include "swrast_setup/swrast_setup.h"
  40. #include "tnl/tnl.h"
  41. #include "drivers/common/driverfuncs.h"
  42. #include "drivers/common/meta.h"
  43.  
  44. #include "intel_chipset.h"
  45. #include "intel_buffers.h"
  46. #include "intel_tex.h"
  47. #include "intel_batchbuffer.h"
  48. #include "intel_clear.h"
  49. #include "intel_extensions.h"
  50. #include "intel_pixel.h"
  51. #include "intel_regions.h"
  52. #include "intel_buffer_objects.h"
  53. #include "intel_fbo.h"
  54. #include "intel_bufmgr.h"
  55. #include "intel_screen.h"
  56. #include "intel_mipmap_tree.h"
  57.  
  58. #include "utils.h"
  59. #include "../glsl/ralloc.h"
  60.  
  61. #ifndef INTEL_DEBUG
  62. int INTEL_DEBUG = (0);
  63. #endif
  64.  
  65.  
  66. static const GLubyte *
  67. intelGetString(struct gl_context * ctx, GLenum name)
  68. {
  69.    const struct intel_context *const intel = intel_context(ctx);
  70.    const char *chipset;
  71.    static char buffer[128];
  72.    static char driver_name[] = "i915_dri.drv";
  73.  
  74.    switch (name) {
  75.    case GL_VENDOR:
  76.       return (GLubyte *) "Intel Open Source Technology Center";
  77.       break;
  78.  
  79.    case GL_RENDERER:
  80.       switch (intel->intelScreen->deviceID) {
  81. #undef CHIPSET
  82. #define CHIPSET(id, symbol, str) case id: chipset = str; break;
  83. #include "pci_ids/i915_pci_ids.h"
  84.       default:
  85.          chipset = "Unknown Intel Chipset";
  86.          break;
  87.       }
  88.  
  89.       (void) driGetRendererString(buffer, chipset, 0);
  90.       return (GLubyte *) buffer;
  91.  
  92.    case 0x1F04:  /* GL_DRIVER_NAME */
  93.       return (GLubyte*)driver_name;
  94.  
  95.    default:
  96.       return NULL;
  97.    }
  98. }
  99.  
  100. static void
  101. intel_flush_front(struct gl_context *ctx)
  102. {
  103.    struct intel_context *intel = intel_context(ctx);
  104.     __DRIcontext *driContext = intel->driContext;
  105.     __DRIdrawable *driDrawable = driContext->driDrawablePriv;
  106.     __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
  107.  
  108.     if (intel->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  109.       if (screen->dri2.loader->flushFrontBuffer != NULL &&
  110.           driDrawable &&
  111.           driDrawable->loaderPrivate) {
  112.          screen->dri2.loader->flushFrontBuffer(driDrawable,
  113.                                                driDrawable->loaderPrivate);
  114.  
  115.          /* We set the dirty bit in intel_prepare_render() if we're
  116.           * front buffer rendering once we get there.
  117.           */
  118.          intel->front_buffer_dirty = false;
  119.       }
  120.    }
  121. }
  122.  
  123. static unsigned
  124. intel_bits_per_pixel(const struct intel_renderbuffer *rb)
  125. {
  126.    return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
  127. }
  128.  
  129. static void
  130. intel_query_dri2_buffers(struct intel_context *intel,
  131.                          __DRIdrawable *drawable,
  132.                          __DRIbuffer **buffers,
  133.                          int *count);
  134.  
  135. static void
  136. intel_process_dri2_buffer(struct intel_context *intel,
  137.                           __DRIdrawable *drawable,
  138.                           __DRIbuffer *buffer,
  139.                           struct intel_renderbuffer *rb,
  140.                           const char *buffer_name);
  141.  
  142. void
  143. intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
  144. {
  145.    struct gl_framebuffer *fb = drawable->driverPrivate;
  146.    struct intel_renderbuffer *rb;
  147.    struct intel_context *intel = context->driverPrivate;
  148.    __DRIbuffer *buffers = NULL;
  149.    int i, count;
  150.    const char *region_name;
  151.  
  152.    /* Set this up front, so that in case our buffers get invalidated
  153.     * while we're getting new buffers, we don't clobber the stamp and
  154.     * thus ignore the invalidate. */
  155.    drawable->lastStamp = drawable->dri2.stamp;
  156.  
  157.    if (unlikely(INTEL_DEBUG & DEBUG_DRI))
  158.       fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
  159.  
  160.    intel_query_dri2_buffers(intel, drawable, &buffers, &count);
  161.  
  162.    if (buffers == NULL)
  163.       return;
  164.  
  165.    for (i = 0; i < count; i++) {
  166.        switch (buffers[i].attachment) {
  167.        case __DRI_BUFFER_FRONT_LEFT:
  168.            rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
  169.            region_name = "dri2 front buffer";
  170.            break;
  171.  
  172.        case __DRI_BUFFER_FAKE_FRONT_LEFT:
  173.            rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
  174.            region_name = "dri2 fake front buffer";
  175.            break;
  176.  
  177.        case __DRI_BUFFER_BACK_LEFT:
  178.            rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
  179.            region_name = "dri2 back buffer";
  180.            break;
  181.  
  182.        case __DRI_BUFFER_DEPTH:
  183.        case __DRI_BUFFER_HIZ:
  184.        case __DRI_BUFFER_DEPTH_STENCIL:
  185.        case __DRI_BUFFER_STENCIL:
  186.        case __DRI_BUFFER_ACCUM:
  187.        default:
  188.            fprintf(stderr,
  189.                    "unhandled buffer attach event, attachment type %d\n",
  190.                    buffers[i].attachment);
  191.            return;
  192.        }
  193.  
  194.        intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
  195.    }
  196.  
  197.    driUpdateFramebufferSize(&intel->ctx, drawable);
  198. }
  199.  
  200. /**
  201.  * intel_prepare_render should be called anywhere that curent read/drawbuffer
  202.  * state is required.
  203.  */
  204. void
  205. intel_prepare_render(struct intel_context *intel)
  206. {
  207.    __DRIcontext *driContext = intel->driContext;
  208.    __DRIdrawable *drawable;
  209.  
  210.    drawable = driContext->driDrawablePriv;
  211.    if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
  212.       if (drawable->lastStamp != drawable->dri2.stamp)
  213.          intel_update_renderbuffers(driContext, drawable);
  214.       intel_draw_buffer(&intel->ctx);
  215.       driContext->dri2.draw_stamp = drawable->dri2.stamp;
  216.    }
  217.  
  218.    drawable = driContext->driReadablePriv;
  219.    if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
  220.       if (drawable->lastStamp != drawable->dri2.stamp)
  221.          intel_update_renderbuffers(driContext, drawable);
  222.       driContext->dri2.read_stamp = drawable->dri2.stamp;
  223.    }
  224.  
  225.    /* If we're currently rendering to the front buffer, the rendering
  226.     * that will happen next will probably dirty the front buffer.  So
  227.     * mark it as dirty here.
  228.     */
  229.    if (intel->is_front_buffer_rendering)
  230.       intel->front_buffer_dirty = true;
  231.  
  232.    /* Wait for the swapbuffers before the one we just emitted, so we
  233.     * don't get too many swaps outstanding for apps that are GPU-heavy
  234.     * but not CPU-heavy.
  235.     *
  236.     * We're using intelDRI2Flush (called from the loader before
  237.     * swapbuffer) and glFlush (for front buffer rendering) as the
  238.     * indicator that a frame is done and then throttle when we get
  239.     * here as we prepare to render the next frame.  At this point for
  240.     * round trips for swap/copy and getting new buffers are done and
  241.     * we'll spend less time waiting on the GPU.
  242.     *
  243.     * Unfortunately, we don't have a handle to the batch containing
  244.     * the swap, and getting our hands on that doesn't seem worth it,
  245.     * so we just us the first batch we emitted after the last swap.
  246.     */
  247.    if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
  248.       if (!intel->disable_throttling)
  249.          drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
  250.       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
  251.       intel->first_post_swapbuffers_batch = NULL;
  252.       intel->need_throttle = false;
  253.    }
  254. }
  255.  
  256. static void
  257. intel_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
  258. {
  259.     struct intel_context *intel = intel_context(ctx);
  260.     __DRIcontext *driContext = intel->driContext;
  261.  
  262.     if (intel->saved_viewport)
  263.         intel->saved_viewport(ctx, x, y, w, h);
  264.  
  265.     if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  266.        dri2InvalidateDrawable(driContext->driDrawablePriv);
  267.        dri2InvalidateDrawable(driContext->driReadablePriv);
  268.     }
  269. }
  270.  
  271. static const struct dri_debug_control debug_control[] = {
  272.    { "tex",   DEBUG_TEXTURE},
  273.    { "state", DEBUG_STATE},
  274.    { "blit",  DEBUG_BLIT},
  275.    { "mip",   DEBUG_MIPTREE},
  276.    { "fall",  DEBUG_PERF},
  277.    { "perf",  DEBUG_PERF},
  278.    { "bat",   DEBUG_BATCH},
  279.    { "pix",   DEBUG_PIXEL},
  280.    { "buf",   DEBUG_BUFMGR},
  281.    { "reg",   DEBUG_REGION},
  282.    { "fbo",   DEBUG_FBO},
  283.    { "fs",    DEBUG_WM },
  284.    { "sync",  DEBUG_SYNC},
  285.    { "dri",   DEBUG_DRI },
  286.    { "stats", DEBUG_STATS },
  287.    { "wm",    DEBUG_WM },
  288.    { "aub",   DEBUG_AUB },
  289.    { NULL,    0 }
  290. };
  291.  
  292.  
  293. static void
  294. intelInvalidateState(struct gl_context * ctx, GLuint new_state)
  295. {
  296.     struct intel_context *intel = intel_context(ctx);
  297.  
  298.     if (ctx->swrast_context)
  299.        _swrast_InvalidateState(ctx, new_state);
  300.    _vbo_InvalidateState(ctx, new_state);
  301.  
  302.    intel->NewGLState |= new_state;
  303.  
  304.    if (intel->vtbl.invalidate_state)
  305.       intel->vtbl.invalidate_state( intel, new_state );
  306. }
  307.  
  308. void
  309. intel_flush_rendering_to_batch(struct gl_context *ctx)
  310. {
  311.    struct intel_context *intel = intel_context(ctx);
  312.  
  313.    if (intel->Fallback)
  314.       _swrast_flush(ctx);
  315.  
  316.    INTEL_FIREVERTICES(intel);
  317. }
  318.  
  319. void
  320. _intel_flush(struct gl_context *ctx, const char *file, int line)
  321. {
  322.    struct intel_context *intel = intel_context(ctx);
  323.  
  324.    intel_flush_rendering_to_batch(ctx);
  325.  
  326.    if (intel->batch.used)
  327.       _intel_batchbuffer_flush(intel, file, line);
  328. }
  329.  
  330. static void
  331. intel_glFlush(struct gl_context *ctx)
  332. {
  333.    struct intel_context *intel = intel_context(ctx);
  334.  
  335.    intel_flush(ctx);
  336.    intel_flush_front(ctx);
  337.    if (intel->is_front_buffer_rendering)
  338.       intel->need_throttle = true;
  339. }
  340.  
  341. void
  342. intelFinish(struct gl_context * ctx)
  343. {
  344.    struct intel_context *intel = intel_context(ctx);
  345.  
  346.    intel_flush(ctx);
  347.    intel_flush_front(ctx);
  348.  
  349.    if (intel->batch.last_bo)
  350.       drm_intel_bo_wait_rendering(intel->batch.last_bo);
  351. }
  352.  
  353. void
  354. intelInitDriverFunctions(struct dd_function_table *functions)
  355. {
  356.    _mesa_init_driver_functions(functions);
  357.  
  358.    functions->Flush = intel_glFlush;
  359.    functions->Finish = intelFinish;
  360.    functions->GetString = intelGetString;
  361.    functions->UpdateState = intelInvalidateState;
  362.  
  363.    intelInitTextureFuncs(functions);
  364.    intelInitTextureImageFuncs(functions);
  365.    intelInitTextureSubImageFuncs(functions);
  366.    intelInitTextureCopyImageFuncs(functions);
  367.    intelInitClearFuncs(functions);
  368.    intelInitBufferFuncs(functions);
  369.    intelInitPixelFuncs(functions);
  370.    intelInitBufferObjectFuncs(functions);
  371.    intel_init_syncobj_functions(functions);
  372. }
  373.  
  374. static bool
  375. validate_context_version(struct intel_screen *screen,
  376.                          int mesa_api,
  377.                          unsigned major_version,
  378.                          unsigned minor_version,
  379.                          unsigned *dri_ctx_error)
  380. {
  381.    unsigned req_version = 10 * major_version + minor_version;
  382.    unsigned max_version = 0;
  383.  
  384.    switch (mesa_api) {
  385.    case API_OPENGL_COMPAT:
  386.       max_version = screen->max_gl_compat_version;
  387.       break;
  388.    case API_OPENGL_CORE:
  389.       max_version = screen->max_gl_core_version;
  390.       break;
  391.    case API_OPENGLES:
  392.       max_version = screen->max_gl_es1_version;
  393.       break;
  394.    case API_OPENGLES2:
  395.       max_version = screen->max_gl_es2_version;
  396.       break;
  397.    default:
  398.       max_version = 0;
  399.       break;
  400.    }
  401.  
  402.    if (max_version == 0) {
  403.       *dri_ctx_error = __DRI_CTX_ERROR_BAD_API;
  404.       return false;
  405.    } else if (req_version > max_version) {
  406.       *dri_ctx_error = __DRI_CTX_ERROR_BAD_VERSION;
  407.       return false;
  408.    }
  409.  
  410.    return true;
  411. }
  412.  
  413. bool
  414. intelInitContext(struct intel_context *intel,
  415.                  int api,
  416.                  unsigned major_version,
  417.                  unsigned minor_version,
  418.                  const struct gl_config * mesaVis,
  419.                  __DRIcontext * driContextPriv,
  420.                  void *sharedContextPrivate,
  421.                  struct dd_function_table *functions,
  422.                  unsigned *dri_ctx_error)
  423. {
  424.    struct gl_context *ctx = &intel->ctx;
  425.    struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
  426.    __DRIscreen *sPriv = driContextPriv->driScreenPriv;
  427.    struct intel_screen *intelScreen = sPriv->driverPrivate;
  428.    int bo_reuse_mode;
  429.    struct gl_config visual;
  430.  
  431.    /* we can't do anything without a connection to the device */
  432.    if (intelScreen->bufmgr == NULL) {
  433.       *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
  434.       return false;
  435.    }
  436.  
  437.    if (!validate_context_version(intelScreen,
  438.                                  api, major_version, minor_version,
  439.                                  dri_ctx_error))
  440.       return false;
  441.  
  442.    /* Can't rely on invalidate events, fall back to glViewport hack */
  443.    if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
  444.       intel->saved_viewport = functions->Viewport;
  445.       functions->Viewport = intel_viewport;
  446.    }
  447.  
  448.    if (mesaVis == NULL) {
  449.       memset(&visual, 0, sizeof visual);
  450.       mesaVis = &visual;
  451.    }
  452.  
  453.    intel->intelScreen = intelScreen;
  454.  
  455.    if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
  456.                                  functions)) {
  457.       *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
  458.       printf("%s: failed to init mesa context\n", __FUNCTION__);
  459.       return false;
  460.    }
  461.  
  462.    driContextPriv->driverPrivate = intel;
  463.    intel->driContext = driContextPriv;
  464.    intel->driFd = sPriv->fd;
  465.  
  466.    intel->gen = intelScreen->gen;
  467.  
  468.    const int devID = intelScreen->deviceID;
  469.  
  470.    intel->is_945 = IS_945(devID);
  471.  
  472.    intel->has_swizzling = intel->intelScreen->hw_has_swizzling;
  473.  
  474.    memset(&ctx->TextureFormatSupported,
  475.           0, sizeof(ctx->TextureFormatSupported));
  476.  
  477.    driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
  478.                        sPriv->myNum, "i915");
  479.    intel->maxBatchSize = 4096;
  480.  
  481.    /* Estimate the size of the mappable aperture into the GTT.  There's an
  482.     * ioctl to get the whole GTT size, but not one to get the mappable subset.
  483.     * It turns out it's basically always 256MB, though some ancient hardware
  484.     * was smaller.
  485.     */
  486.    uint32_t gtt_size = 256 * 1024 * 1024;
  487.    if (intel->gen == 2)
  488.       gtt_size = 128 * 1024 * 1024;
  489.  
  490.    /* We don't want to map two objects such that a memcpy between them would
  491.     * just fault one mapping in and then the other over and over forever.  So
  492.     * we would need to divide the GTT size by 2.  Additionally, some GTT is
  493.     * taken up by things like the framebuffer and the ringbuffer and such, so
  494.     * be more conservative.
  495.     */
  496.    intel->max_gtt_map_object_size = gtt_size / 4;
  497.  
  498.    intel->bufmgr = intelScreen->bufmgr;
  499.  
  500.    bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
  501.    switch (bo_reuse_mode) {
  502.    case DRI_CONF_BO_REUSE_DISABLED:
  503.       break;
  504.    case DRI_CONF_BO_REUSE_ALL:
  505.       intel_bufmgr_gem_enable_reuse(intel->bufmgr);
  506.       break;
  507.    }
  508.  
  509.    ctx->Const.MinLineWidth = 1.0;
  510.    ctx->Const.MinLineWidthAA = 1.0;
  511.    ctx->Const.MaxLineWidth = 5.0;
  512.    ctx->Const.MaxLineWidthAA = 5.0;
  513.    ctx->Const.LineWidthGranularity = 0.5;
  514.  
  515.    ctx->Const.MinPointSize = 1.0;
  516.    ctx->Const.MinPointSizeAA = 1.0;
  517.    ctx->Const.MaxPointSize = 255.0;
  518.    ctx->Const.MaxPointSizeAA = 3.0;
  519.    ctx->Const.PointSizeGranularity = 1.0;
  520.  
  521.    ctx->Const.StripTextureBorder = GL_TRUE;
  522.  
  523.    /* reinitialize the context point state.
  524.     * It depend on constants in __struct gl_contextRec::Const
  525.     */
  526.    _mesa_init_point(ctx);
  527.  
  528.    ctx->Const.MaxRenderbufferSize = 2048;
  529.  
  530.    _swrast_CreateContext(ctx);
  531.    _vbo_CreateContext(ctx);
  532.    if (ctx->swrast_context) {
  533.       _tnl_CreateContext(ctx);
  534.       _swsetup_CreateContext(ctx);
  535.  
  536.       /* Configure swrast to match hardware characteristics: */
  537.       _swrast_allow_pixel_fog(ctx, false);
  538.       _swrast_allow_vertex_fog(ctx, true);
  539.    }
  540.  
  541.    _mesa_meta_init(ctx);
  542.  
  543.    intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
  544.    intel->hw_stipple = 1;
  545.  
  546.    intel->RenderIndex = ~0;
  547.  
  548.    intelInitExtensions(ctx);
  549.  
  550.    INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
  551.    if (INTEL_DEBUG & DEBUG_BUFMGR)
  552.       dri_bufmgr_set_debug(intel->bufmgr, true);
  553.    if (INTEL_DEBUG & DEBUG_PERF)
  554.       intel->perf_debug = true;
  555.  
  556.    if (INTEL_DEBUG & DEBUG_AUB)
  557.       drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
  558.  
  559.    intel_batchbuffer_init(intel);
  560.  
  561.    intel_fbo_init(intel);
  562.  
  563.    intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
  564.  
  565.    intel->prim.primitive = ~0;
  566.  
  567.    /* Force all software fallbacks */
  568.    if (driQueryOptionb(&intel->optionCache, "no_rast")) {
  569.       fprintf(stderr, "disabling 3D rasterization\n");
  570.       intel->no_rast = 1;
  571.    }
  572.  
  573.    if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
  574.       fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
  575.       intel->always_flush_batch = 1;
  576.    }
  577.  
  578.    if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
  579.       fprintf(stderr, "flushing GPU caches before/after each draw call\n");
  580.       intel->always_flush_cache = 1;
  581.    }
  582.  
  583.    if (driQueryOptionb(&intel->optionCache, "disable_throttling")) {
  584.       fprintf(stderr, "disabling flush throttling\n");
  585.       intel->disable_throttling = 1;
  586.    }
  587.  
  588.    return true;
  589. }
  590.  
  591. void
  592. intelDestroyContext(__DRIcontext * driContextPriv)
  593. {
  594.    struct intel_context *intel =
  595.       (struct intel_context *) driContextPriv->driverPrivate;
  596.    struct gl_context *ctx = &intel->ctx;
  597.  
  598.    assert(intel);               /* should never be null */
  599.    if (intel) {
  600.       INTEL_FIREVERTICES(intel);
  601.  
  602.       /* Dump a final BMP in case the application doesn't call SwapBuffers */
  603.       if (INTEL_DEBUG & DEBUG_AUB) {
  604.          intel_batchbuffer_flush(intel);
  605.          aub_dump_bmp(&intel->ctx);
  606.       }
  607.  
  608.       _mesa_meta_free(&intel->ctx);
  609.  
  610.       intel->vtbl.destroy(intel);
  611.  
  612.       if (ctx->swrast_context) {
  613.          _swsetup_DestroyContext(&intel->ctx);
  614.          _tnl_DestroyContext(&intel->ctx);
  615.       }
  616.       _vbo_DestroyContext(&intel->ctx);
  617.  
  618.       if (ctx->swrast_context)
  619.          _swrast_DestroyContext(&intel->ctx);
  620.       intel->Fallback = 0x0;      /* don't call _swrast_Flush later */
  621.  
  622.       intel_batchbuffer_free(intel);
  623.  
  624.       free(intel->prim.vb);
  625.       intel->prim.vb = NULL;
  626.       drm_intel_bo_unreference(intel->prim.vb_bo);
  627.       intel->prim.vb_bo = NULL;
  628.       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
  629.       intel->first_post_swapbuffers_batch = NULL;
  630.  
  631.       driDestroyOptionCache(&intel->optionCache);
  632.  
  633.       /* free the Mesa context */
  634.       _mesa_free_context_data(&intel->ctx);
  635.  
  636.       _math_matrix_dtr(&intel->ViewportMatrix);
  637.  
  638.       ralloc_free(intel);
  639.       driContextPriv->driverPrivate = NULL;
  640.    }
  641. }
  642.  
  643. GLboolean
  644. intelUnbindContext(__DRIcontext * driContextPriv)
  645. {
  646.    /* Unset current context and dispath table */
  647.    _mesa_make_current(NULL, NULL, NULL);
  648.  
  649.    return true;
  650. }
  651.  
  652. GLboolean
  653. intelMakeCurrent(__DRIcontext * driContextPriv,
  654.                  __DRIdrawable * driDrawPriv,
  655.                  __DRIdrawable * driReadPriv)
  656. {
  657.    struct intel_context *intel;
  658.    GET_CURRENT_CONTEXT(curCtx);
  659.  
  660.    if (driContextPriv)
  661.       intel = (struct intel_context *) driContextPriv->driverPrivate;
  662.    else
  663.       intel = NULL;
  664.  
  665.    /* According to the glXMakeCurrent() man page: "Pending commands to
  666.     * the previous context, if any, are flushed before it is released."
  667.     * But only flush if we're actually changing contexts.
  668.     */
  669.    if (intel_context(curCtx) && intel_context(curCtx) != intel) {
  670.       _mesa_flush(curCtx);
  671.    }
  672.  
  673.    if (driContextPriv) {
  674.       struct gl_context *ctx = &intel->ctx;
  675.       struct gl_framebuffer *fb, *readFb;
  676.      
  677.       if (driDrawPriv == NULL && driReadPriv == NULL) {
  678.          fb = _mesa_get_incomplete_framebuffer();
  679.          readFb = _mesa_get_incomplete_framebuffer();
  680.       } else {
  681.          fb = driDrawPriv->driverPrivate;
  682.          readFb = driReadPriv->driverPrivate;
  683.          driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
  684.          driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
  685.       }
  686.  
  687.       intel_prepare_render(intel);
  688.       _mesa_make_current(ctx, fb, readFb);
  689.  
  690.       /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
  691.        * is NULL at that point.  We can't call _mesa_makecurrent()
  692.        * first, since we need the buffer size for the initial
  693.        * viewport.  So just call intel_draw_buffer() again here. */
  694.       intel_draw_buffer(ctx);
  695.    }
  696.    else {
  697.       _mesa_make_current(NULL, NULL, NULL);
  698.    }
  699.  
  700.    return true;
  701. }
  702.  
  703. /**
  704.  * \brief Query DRI2 to obtain a DRIdrawable's buffers.
  705.  *
  706.  * To determine which DRI buffers to request, examine the renderbuffers
  707.  * attached to the drawable's framebuffer. Then request the buffers with
  708.  * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
  709.  *
  710.  * This is called from intel_update_renderbuffers().
  711.  *
  712.  * \param drawable      Drawable whose buffers are queried.
  713.  * \param buffers       [out] List of buffers returned by DRI2 query.
  714.  * \param buffer_count  [out] Number of buffers returned.
  715.  *
  716.  * \see intel_update_renderbuffers()
  717.  * \see DRI2GetBuffers()
  718.  * \see DRI2GetBuffersWithFormat()
  719.  */
  720. static void
  721. intel_query_dri2_buffers(struct intel_context *intel,
  722.                          __DRIdrawable *drawable,
  723.                          __DRIbuffer **buffers,
  724.                          int *buffer_count)
  725. {
  726.    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
  727.    struct gl_framebuffer *fb = drawable->driverPrivate;
  728.    int i = 0;
  729.    unsigned attachments[8];
  730.  
  731.    struct intel_renderbuffer *front_rb;
  732.    struct intel_renderbuffer *back_rb;
  733.  
  734.    front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
  735.    back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
  736.  
  737.    memset(attachments, 0, sizeof(attachments));
  738.    if ((intel->is_front_buffer_rendering ||
  739.         intel->is_front_buffer_reading ||
  740.         !back_rb) && front_rb) {
  741.       /* If a fake front buffer is in use, then querying for
  742.        * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
  743.        * the real front buffer to the fake front buffer.  So before doing the
  744.        * query, we need to make sure all the pending drawing has landed in the
  745.        * real front buffer.
  746.        */
  747.       intel_flush(&intel->ctx);
  748.       intel_flush_front(&intel->ctx);
  749.  
  750.       attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
  751.       attachments[i++] = intel_bits_per_pixel(front_rb);
  752.    } else if (front_rb && intel->front_buffer_dirty) {
  753.       /* We have pending front buffer rendering, but we aren't querying for a
  754.        * front buffer.  If the front buffer we have is a fake front buffer,
  755.        * the X server is going to throw it away when it processes the query.
  756.        * So before doing the query, make sure all the pending drawing has
  757.        * landed in the real front buffer.
  758.        */
  759.       intel_flush(&intel->ctx);
  760.       intel_flush_front(&intel->ctx);
  761.    }
  762.  
  763.    if (back_rb) {
  764.       attachments[i++] = __DRI_BUFFER_BACK_LEFT;
  765.       attachments[i++] = intel_bits_per_pixel(back_rb);
  766.    }
  767.  
  768.    assert(i <= ARRAY_SIZE(attachments));
  769.  
  770.    *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
  771.                                                         &drawable->w,
  772.                                                         &drawable->h,
  773.                                                         attachments, i / 2,
  774.                                                         buffer_count,
  775.                                                         drawable->loaderPrivate);
  776. }
  777.  
  778. /**
  779.  * \brief Assign a DRI buffer's DRM region to a renderbuffer.
  780.  *
  781.  * This is called from intel_update_renderbuffers().
  782.  *
  783.  * \par Note:
  784.  *    DRI buffers whose attachment point is DRI2BufferStencil or
  785.  *    DRI2BufferDepthStencil are handled as special cases.
  786.  *
  787.  * \param buffer_name is a human readable name, such as "dri2 front buffer",
  788.  *        that is passed to intel_region_alloc_for_handle().
  789.  *
  790.  * \see intel_update_renderbuffers()
  791.  * \see intel_region_alloc_for_handle()
  792.  */
  793. static void
  794. intel_process_dri2_buffer(struct intel_context *intel,
  795.                           __DRIdrawable *drawable,
  796.                           __DRIbuffer *buffer,
  797.                           struct intel_renderbuffer *rb,
  798.                           const char *buffer_name)
  799. {
  800.    struct intel_region *region = NULL;
  801.  
  802.    if (!rb)
  803.       return;
  804.  
  805.    /* We try to avoid closing and reopening the same BO name, because the first
  806.     * use of a mapping of the buffer involves a bunch of page faulting which is
  807.     * moderately expensive.
  808.     */
  809.    if (rb->mt &&
  810.        rb->mt->region &&
  811.        rb->mt->region->name == buffer->name)
  812.       return;
  813.  
  814.    if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
  815.       fprintf(stderr,
  816.               "attaching buffer %d, at %d, cpp %d, pitch %d\n",
  817.               buffer->name, buffer->attachment,
  818.               buffer->cpp, buffer->pitch);
  819.    }
  820.  
  821.    intel_miptree_release(&rb->mt);
  822.    region = intel_region_alloc_for_handle(intel->intelScreen,
  823.                                           buffer->cpp,
  824.                                           drawable->w,
  825.                                           drawable->h,
  826.                                           buffer->pitch,
  827.                                           buffer->name,
  828.                                           buffer_name);
  829.    if (!region)
  830.       return;
  831.  
  832.    rb->mt = intel_miptree_create_for_dri2_buffer(intel,
  833.                                                  buffer->attachment,
  834.                                                  intel_rb_format(rb),
  835.                                                  region);
  836.    intel_region_release(&region);
  837. }
  838.