Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  
  3. Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
  4.  
  5. The Weather Channel (TM) funded Tungsten Graphics to develop the
  6. initial release of the Radeon 8500 driver under the XFree86 license.
  7. This notice must be preserved.
  8.  
  9. Permission is hereby granted, free of charge, to any person obtaining
  10. a copy of this software and associated documentation files (the
  11. "Software"), to deal in the Software without restriction, including
  12. without limitation the rights to use, copy, modify, merge, publish,
  13. distribute, sublicense, and/or sell copies of the Software, and to
  14. permit persons to whom the Software is furnished to do so, subject to
  15. the following conditions:
  16.  
  17. The above copyright notice and this permission notice (including the
  18. next paragraph) shall be included in all copies or substantial
  19. portions of the Software.
  20.  
  21. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  22. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  23. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  24. IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  25. LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  26. OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  27. WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  28.  
  29. **************************************************************************/
  30.  
  31. /*
  32.  * Authors:
  33.  *   Keith Whitwell <keithw@vmware.com>
  34.  */
  35.  
  36. /*
  37.    - Scissor implementation
  38.    - buffer swap/copy ioctls
  39.    - finish/flush
  40.    - state emission
  41.    - cmdbuffer management
  42. */
  43.  
  44. #include <errno.h>
  45. #include "main/glheader.h"
  46. #include "main/imports.h"
  47. #include "main/context.h"
  48. #include "main/enums.h"
  49. #include "main/fbobject.h"
  50. #include "main/framebuffer.h"
  51. #include "main/renderbuffer.h"
  52. #include "drivers/common/meta.h"
  53.  
  54. #include "radeon_common.h"
  55. #include "radeon_drm.h"
  56. #include "radeon_queryobj.h"
  57.  
  58. /**
  59.  * Enable verbose debug output for emit code.
  60.  * 0 no output
  61.  * 1 most output
  62.  * 2 also print state alues
  63.  */
  64. #define RADEON_CMDBUF         0
  65.  
  66. /* =============================================================
  67.  * Scissoring
  68.  */
  69.  
  70. /**
  71.  * Update cliprects and scissors.
  72.  */
  73. void radeonSetCliprects(radeonContextPtr radeon)
  74. {
  75.         __DRIdrawable *const drawable = radeon_get_drawable(radeon);
  76.         __DRIdrawable *const readable = radeon_get_readable(radeon);
  77.  
  78.         if(drawable == NULL && readable == NULL)
  79.                 return;
  80.  
  81.         struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
  82.         struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
  83.  
  84.         if ((draw_rfb->base.Width != drawable->w) ||
  85.             (draw_rfb->base.Height != drawable->h)) {
  86.                 _mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
  87.                                          drawable->w, drawable->h);
  88.         }
  89.  
  90.         if (drawable != readable) {
  91.                 if ((read_rfb->base.Width != readable->w) ||
  92.                     (read_rfb->base.Height != readable->h)) {
  93.                         _mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
  94.                                                  readable->w, readable->h);
  95.                 }
  96.         }
  97.  
  98.         if (radeon->state.scissor.enabled)
  99.                 radeonUpdateScissor(&radeon->glCtx);
  100.  
  101. }
  102.  
  103.  
  104.  
  105. void radeonUpdateScissor( struct gl_context *ctx )
  106. {
  107.         radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
  108.         GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
  109.         GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
  110.         int x1, y1, x2, y2;
  111.         int min_x, min_y, max_x, max_y;
  112.  
  113.         if (!ctx->DrawBuffer)
  114.             return;
  115.         min_x = min_y = 0;
  116.         max_x = ctx->DrawBuffer->Width - 1;
  117.         max_y = ctx->DrawBuffer->Height - 1;
  118.  
  119.         if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  120.                 x1 = x;
  121.                 y1 = ctx->DrawBuffer->Height - (y + h);
  122.                 x2 = x + w - 1;
  123.                 y2 = y1 + h - 1;
  124.         } else {
  125.                 x1 = x;
  126.                 y1 = y;
  127.                 x2 = x + w - 1;
  128.                 y2 = y + h - 1;
  129.  
  130.         }
  131.  
  132.         rmesa->state.scissor.rect.x1 = CLAMP(x1,  min_x, max_x);
  133.         rmesa->state.scissor.rect.y1 = CLAMP(y1,  min_y, max_y);
  134.         rmesa->state.scissor.rect.x2 = CLAMP(x2,  min_x, max_x);
  135.         rmesa->state.scissor.rect.y2 = CLAMP(y2,  min_y, max_y);
  136.  
  137.         if (rmesa->vtbl.update_scissor)
  138.            rmesa->vtbl.update_scissor(ctx);
  139. }
  140.  
  141. /* =============================================================
  142.  * Scissoring
  143.  */
  144.  
  145. void radeonScissor(struct gl_context *ctx)
  146. {
  147.         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  148.         if (ctx->Scissor.EnableFlags) {
  149.                 /* We don't pipeline cliprect changes */
  150.                 radeon_firevertices(radeon);
  151.                 radeonUpdateScissor(ctx);
  152.         }
  153. }
  154.  
  155. /* ================================================================
  156.  * SwapBuffers with client-side throttling
  157.  */
  158.  
  159. uint32_t radeonGetAge(radeonContextPtr radeon)
  160. {
  161.         drm_radeon_getparam_t gp;
  162.         int ret;
  163.         uint32_t age;
  164.  
  165.         gp.param = RADEON_PARAM_LAST_CLEAR;
  166.         gp.value = (int *)&age;
  167.         ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
  168.                                   &gp, sizeof(gp));
  169.         if (ret) {
  170.                 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __func__,
  171.                         ret);
  172.                 exit(1);
  173.         }
  174.  
  175.         return age;
  176. }
  177.  
  178. /**
  179.  * Check if we're about to draw into the front color buffer.
  180.  * If so, set the intel->front_buffer_dirty field to true.
  181.  */
  182. void
  183. radeon_check_front_buffer_rendering(struct gl_context *ctx)
  184. {
  185.         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  186.         const struct gl_framebuffer *fb = ctx->DrawBuffer;
  187.  
  188.         if (fb->Name == 0) {
  189.                 /* drawing to window system buffer */
  190.                 if (fb->_NumColorDrawBuffers > 0) {
  191.                         if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
  192.                                 radeon->front_buffer_dirty = GL_TRUE;
  193.                         }
  194.                 }
  195.         }
  196. }
  197.  
  198.  
  199. void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
  200. {
  201.         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  202.         struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
  203.                 *rrbColor = NULL;
  204.         uint32_t offset = 0;
  205.  
  206.  
  207.         if (!fb) {
  208.                 /* this can happen during the initial context initialization */
  209.                 return;
  210.         }
  211.  
  212.         /* radeons only handle 1 color draw so far */
  213.         if (fb->_NumColorDrawBuffers != 1) {
  214.                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
  215.                 return;
  216.         }
  217.  
  218.         /* Do this here, note core Mesa, since this function is called from
  219.          * many places within the driver.
  220.          */
  221.         if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
  222.                 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
  223.                 _mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
  224.                 /* this updates the DrawBuffer's Width/Height if it's a FBO */
  225.                 _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
  226.         }
  227.  
  228.         if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
  229.                 /* this may occur when we're called by glBindFrameBuffer() during
  230.                  * the process of someone setting up renderbuffers, etc.
  231.                  */
  232.                 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
  233.                 return;
  234.         }
  235.  
  236.         if (fb->Name) {
  237.                 ;/* do something depthy/stencily TODO */
  238.         }
  239.  
  240.                 /* none */
  241.         if (fb->Name == 0) {
  242.                 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
  243.                         rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
  244.                         radeon->front_cliprects = GL_TRUE;
  245.                 } else {
  246.                         rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
  247.                         radeon->front_cliprects = GL_FALSE;
  248.                 }
  249.         } else {
  250.                 /* user FBO in theory */
  251.                 struct radeon_renderbuffer *rrb;
  252.                 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
  253.                 if (rrb) {
  254.                         offset = rrb->draw_offset;
  255.                         rrbColor = rrb;
  256.                 }
  257.         }
  258.  
  259.         if (rrbColor == NULL)
  260.                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
  261.         else
  262.                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
  263.  
  264.  
  265.         if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
  266.                 rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
  267.                 if (rrbDepth && rrbDepth->bo) {
  268.                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
  269.                 } else {
  270.                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
  271.                 }
  272.         } else {
  273.                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
  274.                 rrbDepth = NULL;
  275.         }
  276.  
  277.         if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
  278.                 rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
  279.                 if (rrbStencil && rrbStencil->bo) {
  280.                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
  281.                         /* need to re-compute stencil hw state */
  282.                         if (!rrbDepth)
  283.                                 rrbDepth = rrbStencil;
  284.                 } else {
  285.                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
  286.                 }
  287.         } else {
  288.                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
  289.                 if (ctx->Driver.Enable != NULL)
  290.                         ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
  291.                 else
  292.                         ctx->NewState |= _NEW_STENCIL;
  293.         }
  294.  
  295.         /* Update culling direction which changes depending on the
  296.          * orientation of the buffer:
  297.          */
  298.         if (ctx->Driver.FrontFace)
  299.                 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
  300.         else
  301.                 ctx->NewState |= _NEW_POLYGON;
  302.  
  303.         /*
  304.          * Update depth test state
  305.          */
  306.         if (ctx->Driver.Enable) {
  307.                 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
  308.                                    (ctx->Depth.Test && fb->Visual.depthBits > 0));
  309.                 /* Need to update the derived ctx->Stencil._Enabled first */
  310.                 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
  311.                                    (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
  312.         } else {
  313.                 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
  314.         }
  315.  
  316.         _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
  317.         _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
  318.         radeon->state.color.draw_offset = offset;
  319.  
  320.         ctx->NewState |= _NEW_VIEWPORT;
  321.  
  322.         /* Set state we know depends on drawable parameters:
  323.          */
  324.         radeonUpdateScissor(ctx);
  325.         radeon->NewGLState |= _NEW_SCISSOR;
  326.  
  327.         if (ctx->Driver.DepthRange)
  328.                 ctx->Driver.DepthRange(ctx);
  329.  
  330.         /* Update culling direction which changes depending on the
  331.          * orientation of the buffer:
  332.          */
  333.         if (ctx->Driver.FrontFace)
  334.                 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
  335.         else
  336.                 ctx->NewState |= _NEW_POLYGON;
  337. }
  338.  
  339. /**
  340.  * Called via glDrawBuffer.
  341.  */
  342. void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
  343. {
  344.         if (RADEON_DEBUG & RADEON_DRI)
  345.                 fprintf(stderr, "%s %s\n", __func__,
  346.                         _mesa_lookup_enum_by_nr( mode ));
  347.  
  348.         if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  349.                 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  350.  
  351.                 const GLboolean was_front_buffer_rendering =
  352.                         radeon->is_front_buffer_rendering;
  353.  
  354.                 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
  355.                                             (mode == GL_FRONT);
  356.  
  357.       /* If we weren't front-buffer rendering before but we are now, make sure
  358.        * that the front-buffer has actually been allocated.
  359.        */
  360.                 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
  361.                         radeon_update_renderbuffers(radeon->dri.context,
  362.                                 radeon->dri.context->driDrawablePriv, GL_FALSE);
  363.       }
  364.         }
  365.  
  366.         radeon_draw_buffer(ctx, ctx->DrawBuffer);
  367. }
  368.  
  369. void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
  370. {
  371.         if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  372.                 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
  373.                 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
  374.                 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
  375.                                         || (mode == GL_FRONT);
  376.  
  377.                 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
  378.                         radeon_update_renderbuffers(rmesa->dri.context,
  379.                                                     rmesa->dri.context->driReadablePriv, GL_FALSE);
  380.                 }
  381.         }
  382.         /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
  383.         if (ctx->ReadBuffer == ctx->DrawBuffer) {
  384.                 /* This will update FBO completeness status.
  385.                  * A framebuffer will be incomplete if the GL_READ_BUFFER setting
  386.                  * refers to a missing renderbuffer.  Calling glReadBuffer can set
  387.                  * that straight and can make the drawing buffer complete.
  388.                  */
  389.                 radeon_draw_buffer(ctx, ctx->DrawBuffer);
  390.         }
  391. }
  392.  
  393. void radeon_window_moved(radeonContextPtr radeon)
  394. {
  395.         /* Cliprects has to be updated before doing anything else */
  396.         radeonSetCliprects(radeon);
  397. }
  398.  
  399. void radeon_viewport(struct gl_context *ctx)
  400. {
  401.         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  402.         __DRIcontext *driContext = radeon->dri.context;
  403.         void (*old_viewport)(struct gl_context *ctx);
  404.  
  405.         if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
  406.                 if (radeon->is_front_buffer_rendering) {
  407.                         ctx->Driver.Flush(ctx);
  408.                 }
  409.                 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
  410.                 if (driContext->driDrawablePriv != driContext->driReadablePriv)
  411.                         radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
  412.         }
  413.  
  414.         old_viewport = ctx->Driver.Viewport;
  415.         ctx->Driver.Viewport = NULL;
  416.         radeon_window_moved(radeon);
  417.         radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
  418.         ctx->Driver.Viewport = old_viewport;
  419. }
  420.  
  421. static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
  422. {
  423.         int i, j, reg, count;
  424.         int dwords;
  425.         uint32_t packet0;
  426.         if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
  427.                 return;
  428.  
  429.         dwords = (*state->check) (&radeon->glCtx, state);
  430.  
  431.         fprintf(stderr, "  emit %s %d/%d\n", state->name, dwords, state->cmd_size);
  432.  
  433.         if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
  434.                 if (dwords > state->cmd_size)
  435.                         dwords = state->cmd_size;
  436.                 for (i = 0; i < dwords;) {
  437.                         packet0 = state->cmd[i];
  438.                         reg = (packet0 & 0x1FFF) << 2;
  439.                         count = ((packet0 & 0x3FFF0000) >> 16) + 1;
  440.                         fprintf(stderr, "      %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
  441.                                         state->name, i, reg, count);
  442.                         ++i;
  443.                         for (j = 0; j < count && i < dwords; j++) {
  444.                                 fprintf(stderr, "      %s[%d]: 0x%04x = %08x\n",
  445.                                                 state->name, i, reg, state->cmd[i]);
  446.                                 reg += 4;
  447.                                 ++i;
  448.                         }
  449.                 }
  450.         }
  451. }
  452.  
  453. /**
  454.  * Count total size for next state emit.
  455.  **/
  456. GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
  457. {
  458.         struct radeon_state_atom *atom;
  459.         GLuint dwords = 0;
  460.         /* check if we are going to emit full state */
  461.  
  462.         if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
  463.                 if (!radeon->hw.is_dirty)
  464.                         goto out;
  465.                 foreach(atom, &radeon->hw.atomlist) {
  466.                         if (atom->dirty) {
  467.                                 const GLuint atom_size = atom->check(&radeon->glCtx, atom);
  468.                                 dwords += atom_size;
  469.                                 if (RADEON_CMDBUF && atom_size) {
  470.                                         radeon_print_state_atom(radeon, atom);
  471.                                 }
  472.                         }
  473.                 }
  474.         } else {
  475.                 foreach(atom, &radeon->hw.atomlist) {
  476.                         const GLuint atom_size = atom->check(&radeon->glCtx, atom);
  477.                         dwords += atom_size;
  478.                         if (RADEON_CMDBUF && atom_size) {
  479.                                 radeon_print_state_atom(radeon, atom);
  480.                         }
  481.  
  482.                 }
  483.         }
  484. out:
  485.         radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
  486.         return dwords;
  487. }
  488.  
  489. static inline void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
  490. {
  491.         BATCH_LOCALS(radeon);
  492.         int dwords;
  493.  
  494.         dwords = (*atom->check) (&radeon->glCtx, atom);
  495.         if (dwords) {
  496.  
  497.                 radeon_print_state_atom(radeon, atom);
  498.  
  499.                 if (atom->emit) {
  500.                         (*atom->emit)(&radeon->glCtx, atom);
  501.                 } else {
  502.                         BEGIN_BATCH(dwords);
  503.                         OUT_BATCH_TABLE(atom->cmd, dwords);
  504.                         END_BATCH();
  505.                 }
  506.                 atom->dirty = GL_FALSE;
  507.  
  508.         } else {
  509.                 radeon_print(RADEON_STATE, RADEON_VERBOSE, "  skip state %s\n", atom->name);
  510.         }
  511.  
  512. }
  513.  
  514. static inline void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
  515. {
  516.         struct radeon_state_atom *atom;
  517.  
  518.         /* Emit actual atoms */
  519.         if (radeon->hw.all_dirty || emitAll) {
  520.                 foreach(atom, &radeon->hw.atomlist)
  521.                         radeon_emit_atom( radeon, atom );
  522.         } else {
  523.                 foreach(atom, &radeon->hw.atomlist) {
  524.                         if ( atom->dirty )
  525.                                 radeon_emit_atom( radeon, atom );
  526.                 }
  527.         }
  528.  
  529.         COMMIT_BATCH();
  530. }
  531.  
  532. void radeonEmitState(radeonContextPtr radeon)
  533. {
  534.         radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __func__);
  535.  
  536.         if (radeon->vtbl.pre_emit_state)
  537.                 radeon->vtbl.pre_emit_state(radeon);
  538.  
  539.         /* this code used to return here but now it emits zbs */
  540.         if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
  541.                 return;
  542.  
  543.         if (!radeon->cmdbuf.cs->cdw) {
  544.                 if (RADEON_DEBUG & RADEON_STATE)
  545.                         fprintf(stderr, "Begin reemit state\n");
  546.  
  547.                 radeonEmitAtoms(radeon, GL_TRUE);
  548.         } else {
  549.  
  550.                 if (RADEON_DEBUG & RADEON_STATE)
  551.                         fprintf(stderr, "Begin dirty state\n");
  552.  
  553.                 radeonEmitAtoms(radeon, GL_FALSE);
  554.         }
  555.  
  556.         radeon->hw.is_dirty = GL_FALSE;
  557.         radeon->hw.all_dirty = GL_FALSE;
  558. }
  559.  
  560.  
  561. void radeonFlush(struct gl_context *ctx)
  562. {
  563.         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  564.         if (RADEON_DEBUG & RADEON_IOCTL)
  565.                 fprintf(stderr, "%s %d\n", __func__, radeon->cmdbuf.cs->cdw);
  566.  
  567.         /* okay if we have no cmds in the buffer &&
  568.            we have no DMA flush &&
  569.            we have no DMA buffer allocated.
  570.            then no point flushing anything at all.
  571.         */
  572.         if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
  573.                 goto flush_front;
  574.  
  575.         if (radeon->dma.flush)
  576.                 radeon->dma.flush( ctx );
  577.  
  578.         if (radeon->cmdbuf.cs->cdw)
  579.                 rcommonFlushCmdBuf(radeon, __func__);
  580.  
  581. flush_front:
  582.         if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
  583.                 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
  584.  
  585.                 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
  586.                         && (screen->dri2.loader->flushFrontBuffer != NULL)) {
  587.                         __DRIdrawable * drawable = radeon_get_drawable(radeon);
  588.  
  589.                         /* We set the dirty bit in radeon_prepare_render() if we're
  590.                          * front buffer rendering once we get there.
  591.                          */
  592.                         radeon->front_buffer_dirty = GL_FALSE;
  593.  
  594.                         (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
  595.                 }
  596.         }
  597. }
  598.  
  599. /* Make sure all commands have been sent to the hardware and have
  600.  * completed processing.
  601.  */
  602. void radeonFinish(struct gl_context * ctx)
  603. {
  604.         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
  605.         struct gl_framebuffer *fb = ctx->DrawBuffer;
  606.         struct radeon_renderbuffer *rrb;
  607.         int i;
  608.  
  609.         if (ctx->Driver.Flush)
  610.                 ctx->Driver.Flush(ctx); /* +r6/r7 */
  611.  
  612.         for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
  613.                 struct radeon_renderbuffer *rrb;
  614.                 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
  615.                 if (rrb && rrb->bo)
  616.                         radeon_bo_wait(rrb->bo);
  617.         }
  618.         rrb = radeon_get_depthbuffer(radeon);
  619.         if (rrb && rrb->bo)
  620.                 radeon_bo_wait(rrb->bo);
  621. }
  622.  
  623. /* cmdbuffer */
  624. /**
  625.  * Send the current command buffer via ioctl to the hardware.
  626.  */
  627. int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
  628. {
  629.         int ret = 0;
  630.  
  631.         if (rmesa->cmdbuf.flushing) {
  632.                 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
  633.                 exit(-1);
  634.         }
  635.         rmesa->cmdbuf.flushing = 1;
  636.  
  637.         if (RADEON_DEBUG & RADEON_IOCTL) {
  638.                 fprintf(stderr, "%s from %s\n", __func__, caller);
  639.         }
  640.  
  641.         radeonEmitQueryEnd(&rmesa->glCtx);
  642.  
  643.         if (rmesa->cmdbuf.cs->cdw) {
  644.                 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
  645.                 rmesa->hw.all_dirty = GL_TRUE;
  646.         }
  647.         radeon_cs_erase(rmesa->cmdbuf.cs);
  648.         rmesa->cmdbuf.flushing = 0;
  649.  
  650.         if (!rmesa->vtbl.revalidate_all_buffers(&rmesa->glCtx))
  651.                 fprintf(stderr,"failed to revalidate buffers\n");
  652.  
  653.         return ret;
  654. }
  655.  
  656. int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
  657. {
  658.         int ret;
  659.  
  660.         radeonReleaseDmaRegions(rmesa);
  661.  
  662.         ret = rcommonFlushCmdBufLocked(rmesa, caller);
  663.  
  664.         if (ret) {
  665.                 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
  666.                                 "parse or rejected command stream. See dmesg "
  667.                                 "for more info.\n", ret);
  668.                 exit(ret);
  669.         }
  670.  
  671.         return ret;
  672. }
  673.  
  674. /**
  675.  * Make sure that enough space is available in the command buffer
  676.  * by flushing if necessary.
  677.  *
  678.  * \param dwords The number of dwords we need to be free on the command buffer
  679.  */
  680. GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
  681. {
  682.    if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
  683.          || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
  684.       /* If we try to flush empty buffer there is too big rendering operation. */
  685.       assert(rmesa->cmdbuf.cs->cdw);
  686.       rcommonFlushCmdBuf(rmesa, caller);
  687.       return GL_TRUE;
  688.    }
  689.    return GL_FALSE;
  690. }
  691.  
  692. void rcommonInitCmdBuf(radeonContextPtr rmesa)
  693. {
  694.         GLuint size;
  695.         struct drm_radeon_gem_info mminfo = { 0 };
  696.  
  697.         /* Initialize command buffer */
  698.         size = 256 * driQueryOptioni(&rmesa->optionCache,
  699.                                      "command_buffer_size");
  700.         if (size < 2 * rmesa->hw.max_state_size) {
  701.                 size = 2 * rmesa->hw.max_state_size + 65535;
  702.         }
  703.         if (size > 64 * 256)
  704.                 size = 64 * 256;
  705.  
  706.         radeon_print(RADEON_CS, RADEON_VERBOSE,
  707.                         "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
  708.         radeon_print(RADEON_CS, RADEON_VERBOSE,
  709.                         "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
  710.         radeon_print(RADEON_CS, RADEON_VERBOSE,
  711.                         "Allocating %d bytes command buffer (max state is %d bytes)\n",
  712.                         size * 4, rmesa->hw.max_state_size * 4);
  713.  
  714.         rmesa->cmdbuf.csm =
  715.                 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
  716.         if (rmesa->cmdbuf.csm == NULL) {
  717.                 /* FIXME: fatal error */
  718.                 return;
  719.         }
  720.         rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
  721.         assert(rmesa->cmdbuf.cs != NULL);
  722.         rmesa->cmdbuf.size = size;
  723.  
  724.         radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
  725.                                   (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
  726.  
  727.  
  728.         if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
  729.                                  &mminfo, sizeof(mminfo))) {
  730.                 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
  731.                                     mminfo.vram_visible);
  732.                 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
  733.                                     mminfo.gart_size);
  734.         }
  735. }
  736.  
  737. /**
  738.  * Destroy the command buffer
  739.  */
  740. void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
  741. {
  742.         radeon_cs_destroy(rmesa->cmdbuf.cs);
  743.         radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
  744. }
  745.  
  746. void rcommonBeginBatch(radeonContextPtr rmesa, int n,
  747.                        const char *file,
  748.                        const char *function,
  749.                        int line)
  750. {
  751.         radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
  752.  
  753.     radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
  754.                         n, rmesa->cmdbuf.cs->cdw, function, line);
  755.  
  756. }
  757.  
  758. void radeonUserClear(struct gl_context *ctx, GLuint mask)
  759. {
  760.    _mesa_meta_Clear(ctx, mask);
  761. }
  762.