Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2009 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the
  6.  * "Software"), to deal in the Software without restriction, including
  7.  * without limitation the rights to use, copy, modify, merge, publish,
  8.  * distribute, sub license, and/or sell copies of the Software, and to
  9.  * permit persons to whom the Software is furnished to do so, subject to
  10.  * the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the
  13.  * next paragraph) shall be included in all copies or substantial portions
  14.  * of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  17.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  18.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  19.  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
  20.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  21.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  22.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Xiang Haihao <haihao.xiang@intel.com>
  26.  *    Zou Nan hai <nanhai.zou@intel.com>
  27.  *
  28.  */
  29.  
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include <assert.h>
  34.  
  35. #include "intel_batchbuffer.h"
  36. #include "intel_driver.h"
  37. #include "i965_defines.h"
  38. #include "i965_drv_video.h"
  39.  
  40. #include "i965_media.h"
  41. #include "i965_media_mpeg2.h"
  42. #include "i965_media_h264.h"
  43.  
  44. static void
  45. i965_media_pipeline_select(VADriverContextP ctx, struct i965_media_context *media_context)
  46. {
  47.     struct intel_batchbuffer *batch = media_context->base.batch;
  48.  
  49.     BEGIN_BATCH(batch, 1);
  50.     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
  51.     ADVANCE_BATCH(batch);
  52. }
  53.  
  54. static void
  55. i965_media_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
  56. {
  57.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  58.     struct intel_batchbuffer *batch = media_context->base.batch;
  59.     unsigned int vfe_fence, cs_fence;
  60.  
  61.     vfe_fence = media_context->urb.cs_start;
  62.     cs_fence = URB_SIZE((&i965->intel));
  63.  
  64.     BEGIN_BATCH(batch, 3);
  65.     OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
  66.     OUT_BATCH(batch, 0);
  67.     OUT_BATCH(batch,
  68.               (vfe_fence << UF2_VFE_FENCE_SHIFT) |      /* VFE_SIZE */
  69.               (cs_fence << UF2_CS_FENCE_SHIFT));        /* CS_SIZE */
  70.     ADVANCE_BATCH(batch);
  71. }
  72.  
  73. static void
  74. i965_media_state_base_address(VADriverContextP ctx, struct i965_media_context *media_context)
  75. {
  76.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  77.     struct intel_batchbuffer *batch = media_context->base.batch;
  78.  
  79.     if (IS_IRONLAKE(i965->intel.device_id)) {
  80.         BEGIN_BATCH(batch, 8);
  81.         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
  82.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  83.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  84.        
  85.         if (media_context->indirect_object.bo) {
  86.             OUT_RELOC(batch, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
  87.                       media_context->indirect_object.offset | BASE_ADDRESS_MODIFY);
  88.         } else {
  89.             OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  90.         }
  91.  
  92.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  93.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  94.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  95.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  96.         ADVANCE_BATCH(batch);
  97.     } else {
  98.         BEGIN_BATCH(batch, 6);
  99.         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
  100.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  101.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  102.  
  103.         if (media_context->indirect_object.bo) {
  104.             OUT_RELOC(batch, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
  105.                       media_context->indirect_object.offset | BASE_ADDRESS_MODIFY);
  106.         } else {
  107.             OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  108.         }
  109.  
  110.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  111.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  112.         ADVANCE_BATCH(batch);
  113.     }
  114. }
  115.  
  116. static void
  117. i965_media_state_pointers(VADriverContextP ctx, struct i965_media_context *media_context)
  118. {
  119.     struct intel_batchbuffer *batch = media_context->base.batch;
  120.  
  121.     BEGIN_BATCH(batch, 3);
  122.     OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
  123.  
  124.     if (media_context->extended_state.enabled)
  125.         OUT_RELOC(batch, media_context->extended_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
  126.     else
  127.         OUT_BATCH(batch, 0);
  128.  
  129.     OUT_RELOC(batch, media_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
  130.     ADVANCE_BATCH(batch);
  131. }
  132.  
  133. static void
  134. i965_media_cs_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
  135. {
  136.     struct intel_batchbuffer *batch = media_context->base.batch;
  137.  
  138.     BEGIN_BATCH(batch, 2);
  139.     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
  140.     OUT_BATCH(batch,
  141.               ((media_context->urb.size_cs_entry - 1) << 4) |     /* URB Entry Allocation Size */
  142.               (media_context->urb.num_cs_entries << 0));          /* Number of URB Entries */
  143.     ADVANCE_BATCH(batch);
  144. }
  145.  
  146. static void
  147. i965_media_pipeline_state(VADriverContextP ctx, struct i965_media_context *media_context)
  148. {
  149.     i965_media_state_base_address(ctx, media_context);
  150.     i965_media_state_pointers(ctx, media_context);
  151.     i965_media_cs_urb_layout(ctx, media_context);
  152. }
  153.  
  154. static void
  155. i965_media_constant_buffer(VADriverContextP ctx, struct decode_state *decode_state, struct i965_media_context *media_context)
  156. {
  157.     struct intel_batchbuffer *batch = media_context->base.batch;
  158.  
  159.     BEGIN_BATCH(batch, 2);
  160.     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
  161.     OUT_RELOC(batch, media_context->curbe.bo,
  162.               I915_GEM_DOMAIN_INSTRUCTION, 0,
  163.               media_context->urb.size_cs_entry - 1);
  164.     ADVANCE_BATCH(batch);    
  165. }
  166.  
  167. static void
  168. i965_media_depth_buffer(VADriverContextP ctx, struct i965_media_context *media_context)
  169. {
  170.     struct intel_batchbuffer *batch = media_context->base.batch;
  171.  
  172.     BEGIN_BATCH(batch, 6);
  173.     OUT_BATCH(batch, CMD_DEPTH_BUFFER | 4);
  174.     OUT_BATCH(batch, (I965_DEPTHFORMAT_D32_FLOAT << 18) |
  175.               (I965_SURFACE_NULL << 29));
  176.     OUT_BATCH(batch, 0);
  177.     OUT_BATCH(batch, 0);
  178.     OUT_BATCH(batch, 0);
  179.     OUT_BATCH(batch, 0);
  180.     ADVANCE_BATCH(batch);
  181. }
  182.  
  183. static void
  184. i965_media_pipeline_setup(VADriverContextP ctx,
  185.                           struct decode_state *decode_state,
  186.                           struct i965_media_context *media_context)
  187. {
  188.     struct intel_batchbuffer *batch = media_context->base.batch;
  189.  
  190.     intel_batchbuffer_start_atomic(batch, 0x1000);
  191.     intel_batchbuffer_emit_mi_flush(batch);                             /* step 1 */
  192.     i965_media_depth_buffer(ctx, media_context);
  193.     i965_media_pipeline_select(ctx, media_context);                     /* step 2 */
  194.     i965_media_urb_layout(ctx, media_context);                          /* step 3 */
  195.     i965_media_pipeline_state(ctx, media_context);                      /* step 4 */
  196.     i965_media_constant_buffer(ctx, decode_state, media_context);       /* step 5 */
  197.     assert(media_context->media_objects);
  198.     media_context->media_objects(ctx, decode_state, media_context);     /* step 6 */
  199.     intel_batchbuffer_end_atomic(batch);
  200. }
  201.  
  202. static void
  203. i965_media_decode_init(VADriverContextP ctx,
  204.                        VAProfile profile,
  205.                        struct decode_state *decode_state,
  206.                        struct i965_media_context *media_context)
  207. {
  208.     int i;
  209.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  210.     dri_bo *bo;
  211.  
  212.     /* constant buffer */
  213.     dri_bo_unreference(media_context->curbe.bo);
  214.     bo = dri_bo_alloc(i965->intel.bufmgr,
  215.                       "constant buffer",
  216.                       4096, 64);
  217.     assert(bo);
  218.     media_context->curbe.bo = bo;
  219.  
  220.     /* surface state */
  221.     for (i = 0; i < MAX_MEDIA_SURFACES; i++) {
  222.         dri_bo_unreference(media_context->surface_state[i].bo);
  223.         media_context->surface_state[i].bo = NULL;
  224.     }
  225.  
  226.     /* binding table */
  227.     dri_bo_unreference(media_context->binding_table.bo);
  228.     bo = dri_bo_alloc(i965->intel.bufmgr,
  229.                       "binding table",
  230.                       MAX_MEDIA_SURFACES * sizeof(unsigned int), 32);
  231.     assert(bo);
  232.     media_context->binding_table.bo = bo;
  233.  
  234.     /* interface descriptor remapping table */
  235.     dri_bo_unreference(media_context->idrt.bo);
  236.     bo = dri_bo_alloc(i965->intel.bufmgr,
  237.                       "interface discriptor",
  238.                       MAX_INTERFACE_DESC * sizeof(struct i965_interface_descriptor), 16);
  239.     assert(bo);
  240.     media_context->idrt.bo = bo;
  241.  
  242.     /* vfe state */
  243.     dri_bo_unreference(media_context->vfe_state.bo);
  244.     bo = dri_bo_alloc(i965->intel.bufmgr,
  245.                       "vfe state",
  246.                       sizeof(struct i965_vfe_state), 32);
  247.     assert(bo);
  248.     media_context->vfe_state.bo = bo;
  249.  
  250.     /* extended state */
  251.     media_context->extended_state.enabled = 0;
  252.  
  253.     switch (profile) {
  254.     case VAProfileMPEG2Simple:
  255.     case VAProfileMPEG2Main:
  256.         i965_media_mpeg2_decode_init(ctx, decode_state, media_context);
  257.         break;
  258.        
  259.     case VAProfileH264Baseline:
  260.     case VAProfileH264Main:
  261.     case VAProfileH264High:
  262.         i965_media_h264_decode_init(ctx, decode_state, media_context);
  263.         break;
  264.  
  265.     default:
  266.         assert(0);
  267.         break;
  268.     }
  269. }
  270.  
  271. static void
  272. i965_media_decode_picture(VADriverContextP ctx,
  273.                           VAProfile profile,
  274.                           union codec_state *codec_state,
  275.                           struct hw_context *hw_context)
  276. {
  277.     struct i965_media_context *media_context = (struct i965_media_context *)hw_context;
  278.     struct decode_state *decode_state = &codec_state->decode;
  279.  
  280.     i965_media_decode_init(ctx, profile, decode_state, media_context);
  281.     assert(media_context->media_states_setup);
  282.     media_context->media_states_setup(ctx, decode_state, media_context);
  283.     i965_media_pipeline_setup(ctx, decode_state, media_context);
  284.     intel_batchbuffer_flush(hw_context->batch);
  285. }
  286.  
  287. static void
  288. i965_media_context_destroy(void *hw_context)
  289. {
  290.     struct i965_media_context *media_context = (struct i965_media_context *)hw_context;
  291.     int i;
  292.  
  293.     if (media_context->free_private_context)
  294.         media_context->free_private_context(&media_context->private_context);
  295.  
  296.     for (i = 0; i < MAX_MEDIA_SURFACES; i++) {
  297.         dri_bo_unreference(media_context->surface_state[i].bo);
  298.         media_context->surface_state[i].bo = NULL;
  299.     }
  300.    
  301.     dri_bo_unreference(media_context->extended_state.bo);
  302.     media_context->extended_state.bo = NULL;
  303.  
  304.     dri_bo_unreference(media_context->vfe_state.bo);
  305.     media_context->vfe_state.bo = NULL;
  306.  
  307.     dri_bo_unreference(media_context->idrt.bo);
  308.     media_context->idrt.bo = NULL;
  309.  
  310.     dri_bo_unreference(media_context->binding_table.bo);
  311.     media_context->binding_table.bo = NULL;
  312.  
  313.     dri_bo_unreference(media_context->curbe.bo);
  314.     media_context->curbe.bo = NULL;
  315.  
  316.     dri_bo_unreference(media_context->indirect_object.bo);
  317.     media_context->indirect_object.bo = NULL;
  318.  
  319.     intel_batchbuffer_free(media_context->base.batch);
  320.     free(media_context);
  321. }
  322.  
  323. struct hw_context *
  324. g4x_dec_hw_context_init(VADriverContextP ctx, VAProfile profile)
  325. {
  326.     struct intel_driver_data *intel = intel_driver_data(ctx);
  327.     struct i965_media_context *media_context = calloc(1, sizeof(struct i965_media_context));
  328.  
  329.     media_context->base.destroy = i965_media_context_destroy;
  330.     media_context->base.run = i965_media_decode_picture;
  331.     media_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER, 0);
  332.  
  333.     switch (profile) {
  334.     case VAProfileMPEG2Simple:
  335.     case VAProfileMPEG2Main:
  336.         i965_media_mpeg2_dec_context_init(ctx, media_context);
  337.         break;
  338.  
  339.     case VAProfileH264Baseline:
  340.     case VAProfileH264Main:
  341.     case VAProfileH264High:
  342.     case VAProfileVC1Simple:
  343.     case VAProfileVC1Main:
  344.     case VAProfileVC1Advanced:
  345.     default:
  346.         assert(0);
  347.         break;
  348.     }
  349.  
  350.     return (struct hw_context *)media_context;
  351. }
  352.  
  353. struct hw_context *
  354. ironlake_dec_hw_context_init(VADriverContextP ctx, VAProfile profile)
  355. {
  356.     struct intel_driver_data *intel = intel_driver_data(ctx);
  357.     struct i965_media_context *media_context = calloc(1, sizeof(struct i965_media_context));
  358.  
  359.     media_context->base.destroy = i965_media_context_destroy;
  360.     media_context->base.run = i965_media_decode_picture;
  361.     media_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER, 0);
  362.  
  363.     switch (profile) {
  364.     case VAProfileMPEG2Simple:
  365.     case VAProfileMPEG2Main:
  366.         i965_media_mpeg2_dec_context_init(ctx, media_context);
  367.         break;
  368.  
  369.     case VAProfileH264Baseline:
  370.     case VAProfileH264Main:
  371.     case VAProfileH264High:
  372.         i965_media_h264_dec_context_init(ctx, media_context);
  373.         break;
  374.  
  375.     case VAProfileVC1Simple:
  376.     case VAProfileVC1Main:
  377.     case VAProfileVC1Advanced:
  378.     default:
  379.         assert(0);
  380.         break;
  381.     }
  382.  
  383.     return (struct hw_context *)media_context;
  384. }
  385.