Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2009 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the
  6.  * "Software"), to deal in the Software without restriction, including
  7.  * without limitation the rights to use, copy, modify, merge, publish,
  8.  * distribute, sub license, and/or sell copies of the Software, and to
  9.  * permit persons to whom the Software is furnished to do so, subject to
  10.  * the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the
  13.  * next paragraph) shall be included in all copies or substantial portions
  14.  * of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  17.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  18.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  19.  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
  20.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  21.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  22.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Xiang Haihao <haihao.xiang@intel.com>
  26.  *    Zou Nan hai <nanhai.zou@intel.com>
  27.  *
  28.  */
  29.  
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include <assert.h>
  34.  
  35. #include "intel_batchbuffer.h"
  36. #include "intel_driver.h"
  37. #include "i965_defines.h"
  38. #include "i965_drv_video.h"
  39.  
  40. #include "i965_media.h"
  41. #include "i965_media_mpeg2.h"
  42. #include "i965_media_h264.h"
  43. #include "i965_decoder_utils.h"
  44.  
  45. static void
  46. i965_media_pipeline_select(VADriverContextP ctx, struct i965_media_context *media_context)
  47. {
  48.     struct intel_batchbuffer *batch = media_context->base.batch;
  49.  
  50.     BEGIN_BATCH(batch, 1);
  51.     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
  52.     ADVANCE_BATCH(batch);
  53. }
  54.  
  55. static void
  56. i965_media_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
  57. {
  58.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  59.     struct intel_batchbuffer *batch = media_context->base.batch;
  60.     unsigned int vfe_fence, cs_fence;
  61.  
  62.     vfe_fence = media_context->urb.cs_start;
  63.     cs_fence = i965->intel.device_info->urb_size;
  64.  
  65.     BEGIN_BATCH(batch, 3);
  66.     OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
  67.     OUT_BATCH(batch, 0);
  68.     OUT_BATCH(batch,
  69.               (vfe_fence << UF2_VFE_FENCE_SHIFT) |      /* VFE_SIZE */
  70.               (cs_fence << UF2_CS_FENCE_SHIFT));        /* CS_SIZE */
  71.     ADVANCE_BATCH(batch);
  72. }
  73.  
  74. static void
  75. i965_media_state_base_address(VADriverContextP ctx, struct i965_media_context *media_context)
  76. {
  77.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  78.     struct intel_batchbuffer *batch = media_context->base.batch;
  79.  
  80.     if (IS_IRONLAKE(i965->intel.device_info)) {
  81.         BEGIN_BATCH(batch, 8);
  82.         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
  83.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  84.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  85.        
  86.         if (media_context->indirect_object.bo) {
  87.             OUT_RELOC(batch, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
  88.                       media_context->indirect_object.offset | BASE_ADDRESS_MODIFY);
  89.         } else {
  90.             OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  91.         }
  92.  
  93.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  94.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  95.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  96.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  97.         ADVANCE_BATCH(batch);
  98.     } else {
  99.         BEGIN_BATCH(batch, 6);
  100.         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
  101.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  102.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  103.  
  104.         if (media_context->indirect_object.bo) {
  105.             OUT_RELOC(batch, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
  106.                       media_context->indirect_object.offset | BASE_ADDRESS_MODIFY);
  107.         } else {
  108.             OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  109.         }
  110.  
  111.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  112.         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  113.         ADVANCE_BATCH(batch);
  114.     }
  115. }
  116.  
  117. static void
  118. i965_media_state_pointers(VADriverContextP ctx, struct i965_media_context *media_context)
  119. {
  120.     struct intel_batchbuffer *batch = media_context->base.batch;
  121.  
  122.     BEGIN_BATCH(batch, 3);
  123.     OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
  124.  
  125.     if (media_context->extended_state.enabled)
  126.         OUT_RELOC(batch, media_context->extended_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
  127.     else
  128.         OUT_BATCH(batch, 0);
  129.  
  130.     OUT_RELOC(batch, media_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
  131.     ADVANCE_BATCH(batch);
  132. }
  133.  
  134. static void
  135. i965_media_cs_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
  136. {
  137.     struct intel_batchbuffer *batch = media_context->base.batch;
  138.  
  139.     BEGIN_BATCH(batch, 2);
  140.     OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
  141.     OUT_BATCH(batch,
  142.               ((media_context->urb.size_cs_entry - 1) << 4) |     /* URB Entry Allocation Size */
  143.               (media_context->urb.num_cs_entries << 0));          /* Number of URB Entries */
  144.     ADVANCE_BATCH(batch);
  145. }
  146.  
  147. static void
  148. i965_media_pipeline_state(VADriverContextP ctx, struct i965_media_context *media_context)
  149. {
  150.     i965_media_state_base_address(ctx, media_context);
  151.     i965_media_state_pointers(ctx, media_context);
  152.     i965_media_cs_urb_layout(ctx, media_context);
  153. }
  154.  
  155. static void
  156. i965_media_constant_buffer(VADriverContextP ctx, struct decode_state *decode_state, struct i965_media_context *media_context)
  157. {
  158.     struct intel_batchbuffer *batch = media_context->base.batch;
  159.  
  160.     BEGIN_BATCH(batch, 2);
  161.     OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
  162.     OUT_RELOC(batch, media_context->curbe.bo,
  163.               I915_GEM_DOMAIN_INSTRUCTION, 0,
  164.               media_context->urb.size_cs_entry - 1);
  165.     ADVANCE_BATCH(batch);    
  166. }
  167.  
  168. static void
  169. i965_media_depth_buffer(VADriverContextP ctx, struct i965_media_context *media_context)
  170. {
  171.     struct intel_batchbuffer *batch = media_context->base.batch;
  172.  
  173.     BEGIN_BATCH(batch, 6);
  174.     OUT_BATCH(batch, CMD_DEPTH_BUFFER | 4);
  175.     OUT_BATCH(batch, (I965_DEPTHFORMAT_D32_FLOAT << 18) |
  176.               (I965_SURFACE_NULL << 29));
  177.     OUT_BATCH(batch, 0);
  178.     OUT_BATCH(batch, 0);
  179.     OUT_BATCH(batch, 0);
  180.     OUT_BATCH(batch, 0);
  181.     ADVANCE_BATCH(batch);
  182. }
  183.  
  184. static void
  185. i965_media_pipeline_setup(VADriverContextP ctx,
  186.                           struct decode_state *decode_state,
  187.                           struct i965_media_context *media_context)
  188. {
  189.     struct intel_batchbuffer *batch = media_context->base.batch;
  190.  
  191.     intel_batchbuffer_start_atomic(batch, 0x1000);
  192.     intel_batchbuffer_emit_mi_flush(batch);                             /* step 1 */
  193.     i965_media_depth_buffer(ctx, media_context);
  194.     i965_media_pipeline_select(ctx, media_context);                     /* step 2 */
  195.     i965_media_urb_layout(ctx, media_context);                          /* step 3 */
  196.     i965_media_pipeline_state(ctx, media_context);                      /* step 4 */
  197.     i965_media_constant_buffer(ctx, decode_state, media_context);       /* step 5 */
  198.     assert(media_context->media_objects);
  199.     media_context->media_objects(ctx, decode_state, media_context);     /* step 6 */
  200.     intel_batchbuffer_end_atomic(batch);
  201. }
  202.  
  203. static void
  204. i965_media_decode_init(VADriverContextP ctx,
  205.                        VAProfile profile,
  206.                        struct decode_state *decode_state,
  207.                        struct i965_media_context *media_context)
  208. {
  209.     int i;
  210.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  211.     dri_bo *bo;
  212.  
  213.     /* constant buffer */
  214.     dri_bo_unreference(media_context->curbe.bo);
  215.     bo = dri_bo_alloc(i965->intel.bufmgr,
  216.                       "constant buffer",
  217.                       4096, 64);
  218.     assert(bo);
  219.     media_context->curbe.bo = bo;
  220.  
  221.     /* surface state */
  222.     for (i = 0; i < MAX_MEDIA_SURFACES; i++) {
  223.         dri_bo_unreference(media_context->surface_state[i].bo);
  224.         media_context->surface_state[i].bo = NULL;
  225.     }
  226.  
  227.     /* binding table */
  228.     dri_bo_unreference(media_context->binding_table.bo);
  229.     bo = dri_bo_alloc(i965->intel.bufmgr,
  230.                       "binding table",
  231.                       MAX_MEDIA_SURFACES * sizeof(unsigned int), 32);
  232.     assert(bo);
  233.     media_context->binding_table.bo = bo;
  234.  
  235.     /* interface descriptor remapping table */
  236.     dri_bo_unreference(media_context->idrt.bo);
  237.     bo = dri_bo_alloc(i965->intel.bufmgr,
  238.                       "interface discriptor",
  239.                       MAX_INTERFACE_DESC * sizeof(struct i965_interface_descriptor), 16);
  240.     assert(bo);
  241.     media_context->idrt.bo = bo;
  242.  
  243.     /* vfe state */
  244.     dri_bo_unreference(media_context->vfe_state.bo);
  245.     bo = dri_bo_alloc(i965->intel.bufmgr,
  246.                       "vfe state",
  247.                       sizeof(struct i965_vfe_state), 32);
  248.     assert(bo);
  249.     media_context->vfe_state.bo = bo;
  250.  
  251.     /* extended state */
  252.     media_context->extended_state.enabled = 0;
  253.  
  254.     switch (profile) {
  255.     case VAProfileMPEG2Simple:
  256.     case VAProfileMPEG2Main:
  257.         i965_media_mpeg2_decode_init(ctx, decode_state, media_context);
  258.         break;
  259.        
  260.     case VAProfileH264ConstrainedBaseline:
  261.     case VAProfileH264Main:
  262.     case VAProfileH264High:
  263.         i965_media_h264_decode_init(ctx, decode_state, media_context);
  264.         break;
  265.  
  266.     default:
  267.         assert(0);
  268.         break;
  269.     }
  270. }
  271.  
  272. static VAStatus
  273. i965_media_decode_picture(VADriverContextP ctx,
  274.                           VAProfile profile,
  275.                           union codec_state *codec_state,
  276.                           struct hw_context *hw_context)
  277. {
  278.     struct i965_media_context *media_context = (struct i965_media_context *)hw_context;
  279.     struct decode_state *decode_state = &codec_state->decode;
  280.     VAStatus vaStatus;
  281.  
  282.     vaStatus = intel_decoder_sanity_check_input(ctx, profile, decode_state);
  283.  
  284.     if (vaStatus != VA_STATUS_SUCCESS)
  285.         goto out;
  286.  
  287.     i965_media_decode_init(ctx, profile, decode_state, media_context);
  288.     assert(media_context->media_states_setup);
  289.     media_context->media_states_setup(ctx, decode_state, media_context);
  290.     i965_media_pipeline_setup(ctx, decode_state, media_context);
  291.     intel_batchbuffer_flush(hw_context->batch);
  292.  
  293.     vaStatus = VA_STATUS_SUCCESS;
  294.  
  295. out:
  296.     return vaStatus;
  297. }
  298.  
  299. static void
  300. i965_media_context_destroy(void *hw_context)
  301. {
  302.     struct i965_media_context *media_context = (struct i965_media_context *)hw_context;
  303.     int i;
  304.  
  305.     if (media_context->free_private_context)
  306.         media_context->free_private_context(&media_context->private_context);
  307.  
  308.     for (i = 0; i < MAX_MEDIA_SURFACES; i++) {
  309.         dri_bo_unreference(media_context->surface_state[i].bo);
  310.         media_context->surface_state[i].bo = NULL;
  311.     }
  312.    
  313.     dri_bo_unreference(media_context->extended_state.bo);
  314.     media_context->extended_state.bo = NULL;
  315.  
  316.     dri_bo_unreference(media_context->vfe_state.bo);
  317.     media_context->vfe_state.bo = NULL;
  318.  
  319.     dri_bo_unreference(media_context->idrt.bo);
  320.     media_context->idrt.bo = NULL;
  321.  
  322.     dri_bo_unreference(media_context->binding_table.bo);
  323.     media_context->binding_table.bo = NULL;
  324.  
  325.     dri_bo_unreference(media_context->curbe.bo);
  326.     media_context->curbe.bo = NULL;
  327.  
  328.     dri_bo_unreference(media_context->indirect_object.bo);
  329.     media_context->indirect_object.bo = NULL;
  330.  
  331.     intel_batchbuffer_free(media_context->base.batch);
  332.     free(media_context);
  333. }
  334.  
  335. struct hw_context *
  336. g4x_dec_hw_context_init(VADriverContextP ctx, struct object_config *obj_config)
  337. {
  338.     struct intel_driver_data *intel = intel_driver_data(ctx);
  339.     struct i965_media_context *media_context = calloc(1, sizeof(struct i965_media_context));
  340.  
  341.     media_context->base.destroy = i965_media_context_destroy;
  342.     media_context->base.run = i965_media_decode_picture;
  343.     media_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER, 0);
  344.  
  345.     switch (obj_config->profile) {
  346.     case VAProfileMPEG2Simple:
  347.     case VAProfileMPEG2Main:
  348.         i965_media_mpeg2_dec_context_init(ctx, media_context);
  349.         break;
  350.  
  351.     case VAProfileH264ConstrainedBaseline:
  352.     case VAProfileH264Main:
  353.     case VAProfileH264High:
  354.     case VAProfileVC1Simple:
  355.     case VAProfileVC1Main:
  356.     case VAProfileVC1Advanced:
  357.     default:
  358.         assert(0);
  359.         break;
  360.     }
  361.  
  362.     return (struct hw_context *)media_context;
  363. }
  364.  
  365. struct hw_context *
  366. ironlake_dec_hw_context_init(VADriverContextP ctx, struct object_config *obj_config)
  367. {
  368.     struct intel_driver_data *intel = intel_driver_data(ctx);
  369.     struct i965_media_context *media_context = calloc(1, sizeof(struct i965_media_context));
  370.  
  371.     media_context->base.destroy = i965_media_context_destroy;
  372.     media_context->base.run = i965_media_decode_picture;
  373.     media_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER, 0);
  374.  
  375.     switch (obj_config->profile) {
  376.     case VAProfileMPEG2Simple:
  377.     case VAProfileMPEG2Main:
  378.         i965_media_mpeg2_dec_context_init(ctx, media_context);
  379.         break;
  380.  
  381.     case VAProfileH264ConstrainedBaseline:
  382.     case VAProfileH264Main:
  383.     case VAProfileH264High:
  384.         i965_media_h264_dec_context_init(ctx, media_context);
  385.         break;
  386.  
  387.     case VAProfileVC1Simple:
  388.     case VAProfileVC1Main:
  389.     case VAProfileVC1Advanced:
  390.     default:
  391.         assert(0);
  392.         break;
  393.     }
  394.  
  395.     return (struct hw_context *)media_context;
  396. }
  397.