Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the
  6.  * "Software"), to deal in the Software without restriction, including
  7.  * without limitation the rights to use, copy, modify, merge, publish,
  8.  * distribute, sub license, and/or sell copies of the Software, and to
  9.  * permit persons to whom the Software is furnished to do so, subject to
  10.  * the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the
  13.  * next paragraph) shall be included in all copies or substantial portions
  14.  * of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  17.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  18.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  19.  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
  20.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  21.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  22.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Xiang Haihao <haihao.xiang@intel.com>
  26.  *    Zhao Yakui <yakui.zhao@intel.com>
  27.  *
  28.  */
  29.  
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include <assert.h>
  34.  
  35. #include "intel_batchbuffer.h"
  36. #include "intel_driver.h"
  37. #include "i965_defines.h"
  38. #include "i965_structs.h"
  39. #include "i965_drv_video.h"
  40. #include "i965_post_processing.h"
  41. #include "i965_render.h"
  42. #include "intel_media.h"
  43.  
  44. #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
  45.  
  46. #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
  47. #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
  48.  
  49. #define GPU_ASM_BLOCK_WIDTH         16
  50. #define GPU_ASM_BLOCK_HEIGHT        8
  51. #define GPU_ASM_X_OFFSET_ALIGNMENT  4
  52.  
  53. #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
  54.  
  55. static VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  56.                                    const struct i965_surface *src_surface,
  57.                                    const VARectangle *src_rect,
  58.                                    struct i965_surface *dst_surface,
  59.                                    const VARectangle *dst_rect,
  60.                                    void *filter_param);
  61.  
  62. static VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  63.                                            const struct i965_surface *src_surface,
  64.                                            const VARectangle *src_rect,
  65.                                            struct i965_surface *dst_surface,
  66.                                            const VARectangle *dst_rect,
  67.                                            void *filter_param);
  68.  
  69. /* TODO: Modify the shader and then compile it again.
  70.  * Currently it is derived from Haswell*/
  71. static const uint32_t pp_null_gen8[][4] = {
  72. };
  73.  
  74. static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
  75. #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
  76. };
  77.  
  78. static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
  79. #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
  80. };
  81.  
  82. static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
  83. #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
  84. };
  85.  
  86. static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
  87. #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
  88. };
  89.  
  90. static const uint32_t pp_nv12_scaling_gen8[][4] = {
  91. #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
  92. };
  93.  
  94. static const uint32_t pp_nv12_avs_gen8[][4] = {
  95. #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
  96. };
  97.  
  98. static const uint32_t pp_nv12_dndi_gen8[][4] = {
  99. // #include "shaders/post_processing/gen7/dndi.g75b"
  100. };
  101.  
  102. static const uint32_t pp_nv12_dn_gen8[][4] = {
  103. // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
  104. };
  105. static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
  106. #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
  107. };
  108. static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
  109. #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
  110. };
  111. static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
  112. #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
  113. };
  114. static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
  115. #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
  116. };
  117. static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
  118. #include "shaders/post_processing/gen8/pa_to_pa.g8b"
  119. };
  120. static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
  121. #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
  122. };
  123. static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
  124. #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
  125. };
  126.  
  127. static struct pp_module pp_modules_gen8[] = {
  128.     {
  129.         {
  130.             "NULL module (for testing)",
  131.             PP_NULL,
  132.             pp_null_gen8,
  133.             sizeof(pp_null_gen8),
  134.             NULL,
  135.         },
  136.  
  137.         pp_null_initialize,
  138.     },
  139.  
  140.     {
  141.         {
  142.             "NV12_NV12",
  143.             PP_NV12_LOAD_SAVE_N12,
  144.             pp_nv12_load_save_nv12_gen8,
  145.             sizeof(pp_nv12_load_save_nv12_gen8),
  146.             NULL,
  147.         },
  148.  
  149.         gen8_pp_plx_avs_initialize,
  150.     },
  151.  
  152.     {
  153.         {
  154.             "NV12_PL3",
  155.             PP_NV12_LOAD_SAVE_PL3,
  156.             pp_nv12_load_save_pl3_gen8,
  157.             sizeof(pp_nv12_load_save_pl3_gen8),
  158.             NULL,
  159.         },
  160.         gen8_pp_plx_avs_initialize,
  161.     },
  162.  
  163.     {
  164.         {
  165.             "PL3_NV12",
  166.             PP_PL3_LOAD_SAVE_N12,
  167.             pp_pl3_load_save_nv12_gen8,
  168.             sizeof(pp_pl3_load_save_nv12_gen8),
  169.             NULL,
  170.         },
  171.  
  172.         gen8_pp_plx_avs_initialize,
  173.     },
  174.  
  175.     {
  176.         {
  177.             "PL3_PL3",
  178.             PP_PL3_LOAD_SAVE_N12,
  179.             pp_pl3_load_save_pl3_gen8,
  180.             sizeof(pp_pl3_load_save_pl3_gen8),
  181.             NULL,
  182.         },
  183.  
  184.         gen8_pp_plx_avs_initialize,
  185.     },
  186.  
  187.     {
  188.         {
  189.             "NV12 Scaling module",
  190.             PP_NV12_SCALING,
  191.             pp_nv12_scaling_gen8,
  192.             sizeof(pp_nv12_scaling_gen8),
  193.             NULL,
  194.         },
  195.  
  196.         gen8_pp_plx_avs_initialize,
  197.     },
  198.  
  199.     {
  200.         {
  201.             "NV12 AVS module",
  202.             PP_NV12_AVS,
  203.             pp_nv12_avs_gen8,
  204.             sizeof(pp_nv12_avs_gen8),
  205.             NULL,
  206.         },
  207.  
  208.         gen8_pp_plx_avs_initialize,
  209.     },
  210.  
  211.     {
  212.         {
  213.             "NV12 DNDI module",
  214.             PP_NV12_DNDI,
  215.             pp_nv12_dndi_gen8,
  216.             sizeof(pp_nv12_dndi_gen8),
  217.             NULL,
  218.         },
  219.  
  220.         pp_null_initialize,
  221.     },
  222.  
  223.     {
  224.         {
  225.             "NV12 DN module",
  226.             PP_NV12_DN,
  227.             pp_nv12_dn_gen8,
  228.             sizeof(pp_nv12_dn_gen8),
  229.             NULL,
  230.         },
  231.  
  232.         pp_null_initialize,
  233.     },
  234.     {
  235.         {
  236.             "NV12_PA module",
  237.             PP_NV12_LOAD_SAVE_PA,
  238.             pp_nv12_load_save_pa_gen8,
  239.             sizeof(pp_nv12_load_save_pa_gen8),
  240.             NULL,
  241.         },
  242.  
  243.         gen8_pp_plx_avs_initialize,
  244.     },
  245.  
  246.     {
  247.         {
  248.             "PL3_PA module",
  249.             PP_PL3_LOAD_SAVE_PA,
  250.             pp_pl3_load_save_pa_gen8,
  251.             sizeof(pp_pl3_load_save_pa_gen8),
  252.             NULL,
  253.         },
  254.  
  255.         gen8_pp_plx_avs_initialize,
  256.     },
  257.  
  258.     {
  259.         {
  260.             "PA_NV12 module",
  261.             PP_PA_LOAD_SAVE_NV12,
  262.             pp_pa_load_save_nv12_gen8,
  263.             sizeof(pp_pa_load_save_nv12_gen8),
  264.             NULL,
  265.         },
  266.  
  267.         gen8_pp_plx_avs_initialize,
  268.     },
  269.  
  270.     {
  271.         {
  272.             "PA_PL3 module",
  273.             PP_PA_LOAD_SAVE_PL3,
  274.             pp_pa_load_save_pl3_gen8,
  275.             sizeof(pp_pa_load_save_pl3_gen8),
  276.             NULL,
  277.         },
  278.  
  279.         gen8_pp_plx_avs_initialize,
  280.     },
  281.  
  282.     {
  283.         {
  284.             "PA_PA module",
  285.             PP_PA_LOAD_SAVE_PA,
  286.             pp_pa_load_save_pa_gen8,
  287.             sizeof(pp_pa_load_save_pa_gen8),
  288.             NULL,
  289.         },
  290.  
  291.         gen8_pp_plx_avs_initialize,
  292.     },
  293.  
  294.     {
  295.         {
  296.             "RGBX_NV12 module",
  297.             PP_RGBX_LOAD_SAVE_NV12,
  298.             pp_rgbx_load_save_nv12_gen8,
  299.             sizeof(pp_rgbx_load_save_nv12_gen8),
  300.             NULL,
  301.         },
  302.  
  303.         gen8_pp_plx_avs_initialize,
  304.     },
  305.  
  306.     {
  307.         {
  308.             "NV12_RGBX module",
  309.             PP_NV12_LOAD_SAVE_RGBX,
  310.             pp_nv12_load_save_rgbx_gen8,
  311.             sizeof(pp_nv12_load_save_rgbx_gen8),
  312.             NULL,
  313.         },
  314.  
  315.         gen8_pp_plx_avs_initialize,
  316.     },
  317. };
  318.  
  319. static int
  320. pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
  321. {
  322.     int fourcc;
  323.  
  324.     if (surface->type == I965_SURFACE_TYPE_IMAGE) {
  325.         struct object_image *obj_image = (struct object_image *)surface->base;
  326.         fourcc = obj_image->image.format.fourcc;
  327.     } else {
  328.         struct object_surface *obj_surface = (struct object_surface *)surface->base;
  329.         fourcc = obj_surface->fourcc;
  330.     }
  331.  
  332.     return fourcc;
  333. }
  334.  
  335. static void
  336. gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
  337. {
  338.     switch (tiling) {
  339.     case I915_TILING_NONE:
  340.         ss->ss0.tiled_surface = 0;
  341.         ss->ss0.tile_walk = 0;
  342.         break;
  343.     case I915_TILING_X:
  344.         ss->ss0.tiled_surface = 1;
  345.         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
  346.         break;
  347.     case I915_TILING_Y:
  348.         ss->ss0.tiled_surface = 1;
  349.         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
  350.         break;
  351.     }
  352. }
  353.  
  354. static void
  355. gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
  356. {
  357.     switch (tiling) {
  358.     case I915_TILING_NONE:
  359.         ss->ss2.tiled_surface = 0;
  360.         ss->ss2.tile_walk = 0;
  361.         break;
  362.     case I915_TILING_X:
  363.         ss->ss2.tiled_surface = 1;
  364.         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
  365.         break;
  366.     case I915_TILING_Y:
  367.         ss->ss2.tiled_surface = 1;
  368.         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
  369.         break;
  370.     }
  371. }
  372.  
  373.  
  374. static void
  375. gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  376.                           dri_bo *surf_bo, unsigned long surf_bo_offset,
  377.                           int width, int height, int pitch, int format,
  378.                           int index, int is_target)
  379. {
  380.     struct gen8_surface_state *ss;
  381.     dri_bo *ss_bo;
  382.     unsigned int tiling;
  383.     unsigned int swizzle;
  384.  
  385.     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
  386.     ss_bo = pp_context->surface_state_binding_table.bo;
  387.     assert(ss_bo);
  388.  
  389.     dri_bo_map(ss_bo, True);
  390.     assert(ss_bo->virtual);
  391.     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
  392.     memset(ss, 0, sizeof(*ss));
  393.     ss->ss0.surface_type = I965_SURFACE_2D;
  394.     ss->ss0.surface_format = format;
  395.     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
  396.     ss->ss2.width = width - 1;
  397.     ss->ss2.height = height - 1;
  398.     ss->ss3.pitch = pitch - 1;
  399.  
  400.     /* Always set 1(align 4 mode) per B-spec */
  401.     ss->ss0.vertical_alignment = 1;
  402.     ss->ss0.horizontal_alignment = 1;
  403.  
  404.     gen8_pp_set_surface_tiling(ss, tiling);
  405.     gen8_render_set_surface_scs(ss);
  406.     dri_bo_emit_reloc(ss_bo,
  407.                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
  408.                       surf_bo_offset,
  409.                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
  410.                       surf_bo);
  411.     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
  412.     dri_bo_unmap(ss_bo);
  413. }
  414.  
  415.  
  416. static void
  417. gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  418.                            dri_bo *surf_bo, unsigned long surf_bo_offset,
  419.                            int width, int height, int wpitch,
  420.                            int xoffset, int yoffset,
  421.                            int format, int interleave_chroma,
  422.                            int index)
  423. {
  424.     struct gen8_surface_state2 *ss2;
  425.     dri_bo *ss2_bo;
  426.     unsigned int tiling;
  427.     unsigned int swizzle;
  428.  
  429.     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
  430.     ss2_bo = pp_context->surface_state_binding_table.bo;
  431.     assert(ss2_bo);
  432.  
  433.     dri_bo_map(ss2_bo, True);
  434.     assert(ss2_bo->virtual);
  435.     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
  436.     memset(ss2, 0, sizeof(*ss2));
  437.     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
  438.     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
  439.     ss2->ss1.width = width - 1;
  440.     ss2->ss1.height = height - 1;
  441.     ss2->ss2.pitch = wpitch - 1;
  442.     ss2->ss2.interleave_chroma = interleave_chroma;
  443.     ss2->ss2.surface_format = format;
  444.     ss2->ss3.x_offset_for_cb = xoffset;
  445.     ss2->ss3.y_offset_for_cb = yoffset;
  446.     gen8_pp_set_surface2_tiling(ss2, tiling);
  447.     dri_bo_emit_reloc(ss2_bo,
  448.                       I915_GEM_DOMAIN_RENDER, 0,
  449.                       surf_bo_offset,
  450.                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
  451.                       surf_bo);
  452.     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
  453.     dri_bo_unmap(ss2_bo);
  454. }
  455.  
  456. static void
  457. gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  458.                                      const struct i965_surface *surface,
  459.                                      int base_index, int is_target,
  460.                                      const VARectangle *rect,
  461.                                      int *width, int *height, int *pitch, int *offset)
  462. {
  463.     struct object_surface *obj_surface;
  464.     struct object_image *obj_image;
  465.     dri_bo *bo;
  466.     int fourcc = pp_get_surface_fourcc(ctx, surface);
  467.     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
  468.  
  469.     if (fourcc_info == NULL)
  470.         return;
  471.  
  472.     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
  473.         obj_surface = (struct object_surface *)surface->base;
  474.         bo = obj_surface->bo;
  475.         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
  476.         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
  477.         pitch[0] = obj_surface->width;
  478.         offset[0] = 0;
  479.  
  480.         if (fourcc_info->num_planes == 1 && is_target)
  481.             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
  482.  
  483.         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
  484.         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
  485.         pitch[1] = obj_surface->cb_cr_pitch;
  486.         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
  487.  
  488.         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
  489.         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
  490.         pitch[2] = obj_surface->cb_cr_pitch;
  491.         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
  492.     } else {
  493.         int U = 0, V = 0;
  494.  
  495.         /* FIXME: add support for ARGB/ABGR image */
  496.         obj_image = (struct object_image *)surface->base;
  497.         bo = obj_image->bo;
  498.         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
  499.         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
  500.         pitch[0] = obj_image->image.pitches[0];
  501.         offset[0] = obj_image->image.offsets[0];
  502.  
  503.         if (fourcc_info->num_planes == 1) {
  504.             if (is_target)
  505.                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
  506.         } else if (fourcc_info->num_planes == 2) {
  507.             U = 1, V = 1;
  508.         } else {
  509.             assert(fourcc_info->num_components == 3);
  510.  
  511.             U = fourcc_info->components[1].plane;
  512.             V = fourcc_info->components[2].plane;
  513.             assert((U == 1 && V == 2) ||
  514.                    (U == 2 && V == 1));
  515.         }
  516.  
  517.         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
  518.         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
  519.         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
  520.         pitch[1] = obj_image->image.pitches[U];
  521.         offset[1] = obj_image->image.offsets[U];
  522.  
  523.         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
  524.         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
  525.         pitch[2] = obj_image->image.pitches[V];
  526.         offset[2] = obj_image->image.offsets[V];
  527.     }
  528.  
  529.     if (is_target) {
  530.         gen8_pp_set_surface_state(ctx, pp_context,
  531.                                   bo, 0,
  532.                                   width[0] / 4, height[0], pitch[0],
  533.                                   I965_SURFACEFORMAT_R8_UINT,
  534.                                   base_index, 1);
  535.  
  536.         if (fourcc_info->num_planes == 2) {
  537.             gen8_pp_set_surface_state(ctx, pp_context,
  538.                                       bo, offset[1],
  539.                                       width[1] / 2, height[1], pitch[1],
  540.                                       I965_SURFACEFORMAT_R8G8_SINT,
  541.                                       base_index + 1, 1);
  542.         } else if (fourcc_info->num_planes == 3) {
  543.             gen8_pp_set_surface_state(ctx, pp_context,
  544.                                       bo, offset[1],
  545.                                       width[1] / 4, height[1], pitch[1],
  546.                                       I965_SURFACEFORMAT_R8_SINT,
  547.                                       base_index + 1, 1);
  548.             gen8_pp_set_surface_state(ctx, pp_context,
  549.                                       bo, offset[2],
  550.                                       width[2] / 4, height[2], pitch[2],
  551.                                       I965_SURFACEFORMAT_R8_SINT,
  552.                                       base_index + 2, 1);
  553.         }
  554.  
  555.         if (fourcc_info->format == I965_COLOR_RGB) {
  556.             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  557.             /* the format is MSB: X-B-G-R */
  558.             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
  559.             if ((fourcc == VA_FOURCC_BGRA) ||
  560.                 (fourcc == VA_FOURCC_BGRX)) {
  561.                 /* It is stored as MSB: X-R-G-B */
  562.                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
  563.             }
  564.         }
  565.     } else {
  566.         int format0 = SURFACE_FORMAT_Y8_UNORM;
  567.  
  568.         switch (fourcc) {
  569.         case VA_FOURCC_YUY2:
  570.             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
  571.             break;
  572.  
  573.         case VA_FOURCC_UYVY:
  574.             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
  575.             break;
  576.  
  577.         default:
  578.             break;
  579.         }
  580.  
  581.         if (fourcc_info->format == I965_COLOR_RGB) {
  582.             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  583.             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
  584.             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
  585.             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
  586.             if ((fourcc == VA_FOURCC_BGRA) ||
  587.                 (fourcc == VA_FOURCC_BGRX)) {
  588.                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
  589.             }
  590.         }
  591.  
  592.         gen8_pp_set_surface2_state(ctx, pp_context,
  593.                                    bo, offset[0],
  594.                                    width[0], height[0], pitch[0],
  595.                                    0, 0,
  596.                                    format0, 0,
  597.                                    base_index);
  598.  
  599.         if (fourcc_info->num_planes == 2) {
  600.             gen8_pp_set_surface2_state(ctx, pp_context,
  601.                                        bo, offset[1],
  602.                                        width[1], height[1], pitch[1],
  603.                                        0, 0,
  604.                                        SURFACE_FORMAT_R8B8_UNORM, 0,
  605.                                        base_index + 1);
  606.         } else if (fourcc_info->num_planes == 3) {
  607.             gen8_pp_set_surface2_state(ctx, pp_context,
  608.                                        bo, offset[1],
  609.                                        width[1], height[1], pitch[1],
  610.                                        0, 0,
  611.                                        SURFACE_FORMAT_R8_UNORM, 0,
  612.                                        base_index + 1);
  613.             gen8_pp_set_surface2_state(ctx, pp_context,
  614.                                        bo, offset[2],
  615.                                        width[2], height[2], pitch[2],
  616.                                        0, 0,
  617.                                        SURFACE_FORMAT_R8_UNORM, 0,
  618.                                        base_index + 2);
  619.         }
  620.     }
  621. }
  622.  
  623. static int
  624. pp_null_x_steps(void *private_context)
  625. {
  626.     return 1;
  627. }
  628.  
  629. static int
  630. pp_null_y_steps(void *private_context)
  631. {
  632.     return 1;
  633. }
  634.  
  635. static int
  636. pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
  637. {
  638.     return 0;
  639. }
  640.  
  641. static VAStatus
  642. pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  643.                    const struct i965_surface *src_surface,
  644.                    const VARectangle *src_rect,
  645.                    struct i965_surface *dst_surface,
  646.                    const VARectangle *dst_rect,
  647.                    void *filter_param)
  648. {
  649.     /* private function & data */
  650.     pp_context->pp_x_steps = pp_null_x_steps;
  651.     pp_context->pp_y_steps = pp_null_y_steps;
  652.     pp_context->private_context = NULL;
  653.     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
  654.  
  655.     dst_surface->flags = src_surface->flags;
  656.  
  657.     return VA_STATUS_SUCCESS;
  658. }
  659.  
  660. static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
  661. {
  662.     int i, dst_width_adjust;
  663.     /* x offset of dest surface must be dword aligned.
  664.      * so we have to extend dst surface on left edge, and mask out pixels not interested
  665.      */
  666.     if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
  667.         pp_context->block_horizontal_mask_left = 0;
  668.         for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
  669.         {
  670.             pp_context->block_horizontal_mask_left |= 1<<i;
  671.         }
  672.     }
  673.     else {
  674.         pp_context->block_horizontal_mask_left = 0xffff;
  675.     }
  676.  
  677.     dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
  678.     if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
  679.         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
  680.     }
  681.     else {
  682.         pp_context->block_horizontal_mask_right = 0xffff;
  683.     }
  684.  
  685.     if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
  686.         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
  687.     }
  688.     else {
  689.         pp_context->block_vertical_mask_bottom = 0xff;
  690.     }
  691.  
  692. }
  693.  
  694. static int
  695. gen7_pp_avs_x_steps(void *private_context)
  696. {
  697.     struct pp_avs_context *pp_avs_context = private_context;
  698.  
  699.     return pp_avs_context->dest_w / 16;
  700. }
  701.  
  702. static int
  703. gen7_pp_avs_y_steps(void *private_context)
  704. {
  705.     struct pp_avs_context *pp_avs_context = private_context;
  706.  
  707.     return pp_avs_context->dest_h / 16;
  708. }
  709.  
  710. static int
  711. gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
  712. {
  713.     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
  714.     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
  715.  
  716.     pp_inline_parameter->grf7.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
  717.     pp_inline_parameter->grf7.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
  718.     pp_inline_parameter->grf7.constant_0 = 0xffffffff;
  719.     pp_inline_parameter->grf7.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
  720.  
  721.     return 0;
  722. }
  723.  
  724. static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
  725.                                               struct i965_post_processing_context *pp_context,
  726.                                               const struct i965_surface *surface)
  727. {
  728.     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  729.     int fourcc = pp_get_surface_fourcc(ctx, surface);
  730.  
  731.     if (fourcc == VA_FOURCC_YUY2) {
  732.         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
  733.         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
  734.         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
  735.     } else if (fourcc == VA_FOURCC_UYVY) {
  736.         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
  737.         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
  738.         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
  739.     }
  740. }
  741.  
  742. static VAStatus
  743. gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  744.                            const struct i965_surface *src_surface,
  745.                            const VARectangle *src_rect,
  746.                            struct i965_surface *dst_surface,
  747.                            const VARectangle *dst_rect,
  748.                            void *filter_param)
  749. {
  750. /* TODO: Add the sampler_8x8 state */
  751.     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
  752.     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  753.     struct gen8_sampler_8x8_avs *sampler_8x8;
  754.     struct i965_sampler_8x8_coefficient *sampler_8x8_state;
  755.     int i;
  756.     int width[3], height[3], pitch[3], offset[3];
  757.     int src_width, src_height;
  758.     unsigned char *cc_ptr;
  759.  
  760.     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
  761.  
  762.     /* source surface */
  763.     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
  764.                                          src_rect,
  765.                                          width, height, pitch, offset);
  766.     src_height = height[0];
  767.     src_width  = width[0];
  768.  
  769.     /* destination surface */
  770.     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
  771.                                          dst_rect,
  772.                                          width, height, pitch, offset);
  773.  
  774.     /* sampler 8x8 state */
  775.     dri_bo_map(pp_context->dynamic_state.bo, True);
  776.     assert(pp_context->dynamic_state.bo->virtual);
  777.  
  778.     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
  779.                         pp_context->sampler_offset;
  780.     /* Currently only one gen8 sampler_8x8 is initialized */
  781.     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
  782.     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
  783.  
  784.     sampler_8x8->dw0.gain_factor = 44;
  785.     sampler_8x8->dw0.weak_edge_threshold = 1;
  786.     sampler_8x8->dw0.strong_edge_threshold = 8;
  787.     /* Use the value like that on Ivy instead of default
  788.      * sampler_8x8->dw0.r3x_coefficient = 5;
  789.      */
  790.     sampler_8x8->dw0.r3x_coefficient = 27;
  791.     sampler_8x8->dw0.r3c_coefficient = 5;
  792.  
  793.     sampler_8x8->dw2.global_noise_estimation = 255;
  794.     sampler_8x8->dw2.non_edge_weight = 1;
  795.     sampler_8x8->dw2.regular_weight = 2;
  796.     sampler_8x8->dw2.strong_edge_weight = 7;
  797.     /* Use the value like that on Ivy instead of default
  798.      * sampler_8x8->dw2.r5x_coefficient = 7;
  799.      * sampler_8x8->dw2.r5cx_coefficient = 7;
  800.      * sampler_8x8->dw2.r5c_coefficient = 7;
  801.      */
  802.     sampler_8x8->dw2.r5x_coefficient = 9;
  803.     sampler_8x8->dw2.r5cx_coefficient = 8;
  804.     sampler_8x8->dw2.r5c_coefficient = 3;
  805.  
  806.     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
  807.     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
  808.     sampler_8x8->dw3.sat_max = 0x1f;
  809.     sampler_8x8->dw3.hue_max = 14;
  810.     /* The 8tap filter will determine whether the adaptive Filter is
  811.      * applied for all channels(dw153).
  812.      * If the 8tap filter is disabled, the adaptive filter should be disabled.
  813.      * Only when 8tap filter is enabled, it can be enabled or not.
  814.      */
  815.     sampler_8x8->dw3.enable_8tap_filter = 3;
  816.     sampler_8x8->dw3.ief4_smooth_enable = 0;
  817.  
  818.     sampler_8x8->dw4.s3u = 0;
  819.     sampler_8x8->dw4.diamond_margin = 4;
  820.     sampler_8x8->dw4.vy_std_enable = 0;
  821.     sampler_8x8->dw4.umid = 110;
  822.     sampler_8x8->dw4.vmid = 154;
  823.  
  824.     sampler_8x8->dw5.diamond_dv = 0;
  825.     sampler_8x8->dw5.diamond_th = 35;
  826.     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
  827.     sampler_8x8->dw5.hs_margin = 3;
  828.     sampler_8x8->dw5.diamond_du = 2;
  829.  
  830.     sampler_8x8->dw6.y_point1 = 46;
  831.     sampler_8x8->dw6.y_point2 = 47;
  832.     sampler_8x8->dw6.y_point3 = 254;
  833.     sampler_8x8->dw6.y_point4 = 255;
  834.  
  835.     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
  836.  
  837.     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
  838.     sampler_8x8->dw8.p0l = 46;
  839.     sampler_8x8->dw8.p1l = 216;
  840.  
  841.     sampler_8x8->dw9.p2l = 236;
  842.     sampler_8x8->dw9.p3l = 236;
  843.     sampler_8x8->dw9.b0l = 133;
  844.     sampler_8x8->dw9.b1l = 130;
  845.  
  846.     sampler_8x8->dw10.b2l = 130;
  847.     sampler_8x8->dw10.b3l = 130;
  848.     /* s0l = -5 / 256. s2.8 */
  849.     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
  850.     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
  851.  
  852.     sampler_8x8->dw11.s1l = 0;
  853.     sampler_8x8->dw11.s2l = 0;
  854.  
  855.     sampler_8x8->dw12.s3l = 0;
  856.     sampler_8x8->dw12.p0u = 46;
  857.     sampler_8x8->dw12.p1u = 66;
  858.     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
  859.  
  860.     sampler_8x8->dw13.p2u = 130;
  861.     sampler_8x8->dw13.p3u = 236;
  862.     sampler_8x8->dw13.b0u = 143;
  863.     sampler_8x8->dw13.b1u = 163;
  864.  
  865.     sampler_8x8->dw14.b2u = 200;
  866.     sampler_8x8->dw14.b3u = 140;
  867.     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
  868.  
  869.     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
  870.     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
  871.  
  872.     sampler_8x8_state = sampler_8x8->coefficients;
  873.  
  874.     for (i = 0; i < 17; i++) {
  875.         float coff;
  876.         coff = i;
  877.         coff = coff / 16;
  878.  
  879.         memset(sampler_8x8_state, 0, sizeof(*sampler_8x8_state));
  880.         /* for Y channel, currently ignore */
  881.         sampler_8x8_state->dw0.table_0x_filter_c0 = 0x0;
  882.         sampler_8x8_state->dw0.table_0x_filter_c1 = 0x0;
  883.         sampler_8x8_state->dw0.table_0x_filter_c2 = 0x0;
  884.         sampler_8x8_state->dw0.table_0x_filter_c3 =
  885.                                 intel_format_convert(1 - coff, 1, 6, 0);
  886.         sampler_8x8_state->dw1.table_0x_filter_c4 =
  887.                                 intel_format_convert(coff, 1, 6, 0);
  888.         sampler_8x8_state->dw1.table_0x_filter_c5 = 0x0;
  889.         sampler_8x8_state->dw1.table_0x_filter_c6 = 0x0;
  890.         sampler_8x8_state->dw1.table_0x_filter_c7 = 0x0;
  891.         sampler_8x8_state->dw2.table_0y_filter_c0 = 0x0;
  892.         sampler_8x8_state->dw2.table_0y_filter_c1 = 0x0;
  893.         sampler_8x8_state->dw2.table_0y_filter_c2 = 0x0;
  894.         sampler_8x8_state->dw2.table_0y_filter_c3 =
  895.                                 intel_format_convert(1 - coff, 1, 6, 0);
  896.         sampler_8x8_state->dw3.table_0y_filter_c4 =
  897.                                 intel_format_convert(coff, 1, 6, 0);
  898.         sampler_8x8_state->dw3.table_0y_filter_c5 = 0x0;
  899.         sampler_8x8_state->dw3.table_0y_filter_c6 = 0x0;
  900.         sampler_8x8_state->dw3.table_0y_filter_c7 = 0x0;
  901.         /* for U/V channel, 0.25 */
  902.         sampler_8x8_state->dw4.table_1x_filter_c0 = 0x0;
  903.         sampler_8x8_state->dw4.table_1x_filter_c1 = 0x0;
  904.         sampler_8x8_state->dw4.table_1x_filter_c2 = 0x0;
  905.         sampler_8x8_state->dw4.table_1x_filter_c3 =
  906.                                 intel_format_convert(1 - coff, 1, 6, 0);
  907.         sampler_8x8_state->dw5.table_1x_filter_c4 =
  908.                                 intel_format_convert(coff, 1, 6, 0);
  909.         sampler_8x8_state->dw5.table_1x_filter_c5 = 0x00;
  910.         sampler_8x8_state->dw5.table_1x_filter_c6 = 0x0;
  911.         sampler_8x8_state->dw5.table_1x_filter_c7 = 0x0;
  912.         sampler_8x8_state->dw6.table_1y_filter_c0 = 0x0;
  913.         sampler_8x8_state->dw6.table_1y_filter_c1 = 0x0;
  914.         sampler_8x8_state->dw6.table_1y_filter_c2 = 0x0;
  915.         sampler_8x8_state->dw6.table_1y_filter_c3 =
  916.                                 intel_format_convert(1 - coff, 1, 6, 0);
  917.         sampler_8x8_state->dw7.table_1y_filter_c4 =
  918.                                 intel_format_convert(coff, 1, 6,0);
  919.         sampler_8x8_state->dw7.table_1y_filter_c5 = 0x0;
  920.         sampler_8x8_state->dw7.table_1y_filter_c6 = 0x0;
  921.         sampler_8x8_state->dw7.table_1y_filter_c7 = 0x0;
  922.         sampler_8x8_state++;
  923.     }
  924.  
  925.     sampler_8x8->dw152.default_sharpness_level = 0;
  926.     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
  927.     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
  928.     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
  929.  
  930.     dri_bo_unmap(pp_context->dynamic_state.bo);
  931.  
  932.  
  933.     /* private function & data */
  934.     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
  935.     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
  936.     pp_context->private_context = &pp_context->pp_avs_context;
  937.     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
  938.  
  939.     pp_avs_context->dest_x = dst_rect->x;
  940.     pp_avs_context->dest_y = dst_rect->y;
  941.     pp_avs_context->dest_w = ALIGN(dst_rect->width, 16);
  942.     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
  943.     pp_avs_context->src_w = src_rect->width;
  944.     pp_avs_context->src_h = src_rect->height;
  945.     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
  946.  
  947.     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
  948.     dw = MAX(dw, dst_rect->width);
  949.  
  950.     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
  951.     pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
  952.     pp_static_parameter->grf2.alpha = 255;
  953.  
  954.     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
  955.     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
  956.     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
  957.         (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
  958.     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
  959.         (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
  960.  
  961.     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
  962.  
  963.     dst_surface->flags = src_surface->flags;
  964.  
  965.     return VA_STATUS_SUCCESS;
  966. }
  967.  
  968. static VAStatus
  969. gen8_pp_initialize(
  970.     VADriverContextP   ctx,
  971.     struct i965_post_processing_context *pp_context,
  972.     const struct i965_surface *src_surface,
  973.     const VARectangle *src_rect,
  974.     struct i965_surface *dst_surface,
  975.     const VARectangle *dst_rect,
  976.     int                pp_index,
  977.     void * filter_param
  978. )
  979. {
  980.     VAStatus va_status;
  981.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  982.     dri_bo *bo;
  983.     int bo_size;
  984.     unsigned int end_offset;
  985.     struct pp_module *pp_module;
  986.     int static_param_size, inline_param_size;
  987.  
  988.     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
  989.     bo = dri_bo_alloc(i965->intel.bufmgr,
  990.                       "surface state & binding table",
  991.                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
  992.                       4096);
  993.     assert(bo);
  994.     pp_context->surface_state_binding_table.bo = bo;
  995.  
  996.     pp_context->idrt.num_interface_descriptors = 0;
  997.  
  998.     pp_context->sampler_size = 2 * 4096;
  999.  
  1000.     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
  1001.                 + pp_context->idrt_size;
  1002.  
  1003.     dri_bo_unreference(pp_context->dynamic_state.bo);
  1004.     bo = dri_bo_alloc(i965->intel.bufmgr,
  1005.                       "dynamic_state",
  1006.                       bo_size,
  1007.                       4096);
  1008.  
  1009.     assert(bo);
  1010.     pp_context->dynamic_state.bo = bo;
  1011.     pp_context->dynamic_state.bo_size = bo_size;
  1012.  
  1013.     end_offset = 0;
  1014.     pp_context->dynamic_state.end_offset = 0;
  1015.  
  1016.     /* Constant buffer offset */
  1017.     pp_context->curbe_offset = ALIGN(end_offset, 64);
  1018.     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
  1019.  
  1020.     /* Interface descriptor offset */
  1021.     pp_context->idrt_offset = ALIGN(end_offset, 64);
  1022.     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
  1023.  
  1024.     /* Sampler state offset */
  1025.     pp_context->sampler_offset = ALIGN(end_offset, 64);
  1026.     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
  1027.  
  1028.     /* update the end offset of dynamic_state */
  1029.     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
  1030.  
  1031.     static_param_size = sizeof(struct gen7_pp_static_parameter);
  1032.     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
  1033.  
  1034.     memset(pp_context->pp_static_parameter, 0, static_param_size);
  1035.     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
  1036.  
  1037.     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
  1038.     pp_context->current_pp = pp_index;
  1039.     pp_module = &pp_context->pp_modules[pp_index];
  1040.  
  1041.     if (pp_module->initialize)
  1042.         va_status = pp_module->initialize(ctx, pp_context,
  1043.                                           src_surface,
  1044.                                           src_rect,
  1045.                                           dst_surface,
  1046.                                           dst_rect,
  1047.                                           filter_param);
  1048.     else
  1049.         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
  1050.  
  1051.     calculate_boundary_block_mask(pp_context, dst_rect);
  1052.  
  1053.     return va_status;
  1054. }
  1055.  
  1056. static void
  1057. gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
  1058.                                    struct i965_post_processing_context *pp_context)
  1059. {
  1060.     struct gen8_interface_descriptor_data *desc;
  1061.     dri_bo *bo;
  1062.     int pp_index = pp_context->current_pp;
  1063.     unsigned char *cc_ptr;
  1064.  
  1065.     bo = pp_context->dynamic_state.bo;
  1066.  
  1067.     dri_bo_map(bo, 1);
  1068.     assert(bo->virtual);
  1069.     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
  1070.  
  1071.     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
  1072.                 pp_context->idrt.num_interface_descriptors;
  1073.  
  1074.     memset(desc, 0, sizeof(*desc));
  1075.     desc->desc0.kernel_start_pointer =
  1076.                 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
  1077.     desc->desc2.single_program_flow = 1;
  1078.     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
  1079.     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
  1080.     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
  1081.     desc->desc4.binding_table_entry_count = 0;
  1082.     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
  1083.     desc->desc5.constant_urb_entry_read_offset = 0;
  1084.  
  1085.     desc->desc5.constant_urb_entry_read_length = 6; /* grf 1-6 */
  1086.  
  1087.     dri_bo_unmap(bo);
  1088.     pp_context->idrt.num_interface_descriptors++;
  1089. }
  1090.  
  1091.  
  1092. static void
  1093. gen8_pp_upload_constants(VADriverContextP ctx,
  1094.                          struct i965_post_processing_context *pp_context)
  1095. {
  1096.     unsigned char *constant_buffer;
  1097.     int param_size;
  1098.  
  1099.     assert(sizeof(struct gen7_pp_static_parameter) == 192);
  1100.  
  1101.     param_size = sizeof(struct gen7_pp_static_parameter);
  1102.  
  1103.     dri_bo_map(pp_context->dynamic_state.bo, 1);
  1104.     assert(pp_context->dynamic_state.bo->virtual);
  1105.     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
  1106.                         pp_context->curbe_offset;
  1107.  
  1108.     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
  1109.     dri_bo_unmap(pp_context->dynamic_state.bo);
  1110.     return;
  1111. }
  1112.  
  1113. static void
  1114. gen8_pp_states_setup(VADriverContextP ctx,
  1115.                      struct i965_post_processing_context *pp_context)
  1116. {
  1117.     gen8_pp_interface_descriptor_table(ctx, pp_context);
  1118.     gen8_pp_upload_constants(ctx, pp_context);
  1119. }
  1120.  
  1121. static void
  1122. gen6_pp_pipeline_select(VADriverContextP ctx,
  1123.                         struct i965_post_processing_context *pp_context)
  1124. {
  1125.     struct intel_batchbuffer *batch = pp_context->batch;
  1126.  
  1127.     BEGIN_BATCH(batch, 1);
  1128.     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
  1129.     ADVANCE_BATCH(batch);
  1130. }
  1131.  
  1132. static void
  1133. gen8_pp_state_base_address(VADriverContextP ctx,
  1134.                            struct i965_post_processing_context *pp_context)
  1135. {
  1136.     struct intel_batchbuffer *batch = pp_context->batch;
  1137.  
  1138.     BEGIN_BATCH(batch, 16);
  1139.     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
  1140.         /* DW1 Generate state address */
  1141.     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  1142.         OUT_BATCH(batch, 0);
  1143.         OUT_BATCH(batch, 0);
  1144.         /* DW4. Surface state address */
  1145.     OUT_RELOC(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
  1146.         OUT_BATCH(batch, 0);
  1147.         /* DW6. Dynamic state address */
  1148.     OUT_RELOC(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
  1149.                 0, 0 | BASE_ADDRESS_MODIFY);
  1150.         OUT_BATCH(batch, 0);
  1151.  
  1152.         /* DW8. Indirect object address */
  1153.     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  1154.         OUT_BATCH(batch, 0);
  1155.  
  1156.         /* DW10. Instruction base address */
  1157.     OUT_RELOC(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
  1158.         OUT_BATCH(batch, 0);
  1159.  
  1160.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1161.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1162.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1163.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1164.     ADVANCE_BATCH(batch);
  1165. }
  1166.  
  1167. static void
  1168. gen8_pp_vfe_state(VADriverContextP ctx,
  1169.                   struct i965_post_processing_context *pp_context)
  1170. {
  1171.     struct intel_batchbuffer *batch = pp_context->batch;
  1172.  
  1173.     BEGIN_BATCH(batch, 9);
  1174.     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
  1175.     OUT_BATCH(batch, 0);
  1176.     OUT_BATCH(batch, 0);
  1177.     OUT_BATCH(batch,
  1178.               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
  1179.               pp_context->vfe_gpu_state.num_urb_entries << 8);
  1180.     OUT_BATCH(batch, 0);
  1181.     OUT_BATCH(batch,
  1182.               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
  1183.                 /* URB Entry Allocation Size, in 256 bits unit */
  1184.               (pp_context->vfe_gpu_state.curbe_allocation_size));
  1185.                 /* CURBE Allocation Size, in 256 bits unit */
  1186.     OUT_BATCH(batch, 0);
  1187.     OUT_BATCH(batch, 0);
  1188.     OUT_BATCH(batch, 0);
  1189.     ADVANCE_BATCH(batch);
  1190. }
  1191.  
  1192. static void
  1193. gen8_interface_descriptor_load(VADriverContextP ctx,
  1194.                                struct i965_post_processing_context *pp_context)
  1195. {
  1196.     struct intel_batchbuffer *batch = pp_context->batch;
  1197.  
  1198.     BEGIN_BATCH(batch, 6);
  1199.  
  1200.     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
  1201.     OUT_BATCH(batch, 0);
  1202.  
  1203.     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
  1204.     OUT_BATCH(batch, 0);
  1205.     OUT_BATCH(batch,
  1206.               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
  1207.     OUT_BATCH(batch, pp_context->idrt_offset);
  1208.     ADVANCE_BATCH(batch);
  1209. }
  1210.  
  1211. static void
  1212. gen8_pp_curbe_load(VADriverContextP ctx,
  1213.                    struct i965_post_processing_context *pp_context)
  1214. {
  1215.     struct intel_batchbuffer *batch = pp_context->batch;
  1216.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  1217.     int param_size = 64;
  1218.  
  1219.     param_size = sizeof(struct gen7_pp_static_parameter);
  1220.  
  1221.     BEGIN_BATCH(batch, 4);
  1222.     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
  1223.     OUT_BATCH(batch, 0);
  1224.     OUT_BATCH(batch,
  1225.               param_size);
  1226.     OUT_BATCH(batch, pp_context->curbe_offset);
  1227.     ADVANCE_BATCH(batch);
  1228. }
  1229.  
  1230. static void
  1231. gen8_pp_object_walker(VADriverContextP ctx,
  1232.                       struct i965_post_processing_context *pp_context)
  1233. {
  1234.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  1235.     struct intel_batchbuffer *batch = pp_context->batch;
  1236.     int x, x_steps, y, y_steps;
  1237.     int param_size, command_length_in_dws, extra_cmd_in_dws;
  1238.     dri_bo *command_buffer;
  1239.     unsigned int *command_ptr;
  1240.  
  1241.     param_size = sizeof(struct gen7_pp_inline_parameter);
  1242.  
  1243.     x_steps = pp_context->pp_x_steps(pp_context->private_context);
  1244.     y_steps = pp_context->pp_y_steps(pp_context->private_context);
  1245.     command_length_in_dws = 6 + (param_size >> 2);
  1246.     extra_cmd_in_dws = 2;
  1247.     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
  1248.                                   "command objects buffer",
  1249.                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
  1250.                                   4096);
  1251.  
  1252.     dri_bo_map(command_buffer, 1);
  1253.     command_ptr = command_buffer->virtual;
  1254.  
  1255.     for (y = 0; y < y_steps; y++) {
  1256.         for (x = 0; x < x_steps; x++) {
  1257.             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
  1258.  
  1259.                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
  1260.                 *command_ptr++ = 0;
  1261.                 *command_ptr++ = 0;
  1262.                 *command_ptr++ = 0;
  1263.                 *command_ptr++ = 0;
  1264.                 *command_ptr++ = 0;
  1265.                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
  1266.                 command_ptr += (param_size >> 2);
  1267.  
  1268.                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
  1269.                 *command_ptr++ = 0;
  1270.             }
  1271.         }
  1272.     }
  1273.  
  1274.     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
  1275.         *command_ptr++ = 0;
  1276.  
  1277.     *command_ptr++ = MI_BATCH_BUFFER_END;
  1278.     *command_ptr++ = 0;
  1279.  
  1280.     dri_bo_unmap(command_buffer);
  1281.  
  1282.     BEGIN_BATCH(batch, 3);
  1283.     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
  1284.     OUT_RELOC(batch, command_buffer,
  1285.               I915_GEM_DOMAIN_COMMAND, 0, 0);
  1286.     OUT_BATCH(batch, 0);
  1287.     ADVANCE_BATCH(batch);
  1288.  
  1289.     dri_bo_unreference(command_buffer);
  1290.  
  1291.     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
  1292.      * will cause control to pass back to ring buffer
  1293.      */
  1294.     intel_batchbuffer_end_atomic(batch);
  1295.     intel_batchbuffer_flush(batch);
  1296.     intel_batchbuffer_start_atomic(batch, 0x1000);
  1297. }
  1298.  
  1299. static void
  1300. gen8_pp_pipeline_setup(VADriverContextP ctx,
  1301.                        struct i965_post_processing_context *pp_context)
  1302. {
  1303.     struct intel_batchbuffer *batch = pp_context->batch;
  1304.  
  1305.     intel_batchbuffer_start_atomic(batch, 0x1000);
  1306.     intel_batchbuffer_emit_mi_flush(batch);
  1307.     gen6_pp_pipeline_select(ctx, pp_context);
  1308.     gen8_pp_state_base_address(ctx, pp_context);
  1309.     gen8_pp_vfe_state(ctx, pp_context);
  1310.     gen8_pp_curbe_load(ctx, pp_context);
  1311.     gen8_interface_descriptor_load(ctx, pp_context);
  1312.     gen8_pp_vfe_state(ctx, pp_context);
  1313.     gen8_pp_object_walker(ctx, pp_context);
  1314.     intel_batchbuffer_end_atomic(batch);
  1315. }
  1316.  
  1317. static VAStatus
  1318. gen8_post_processing(
  1319.     VADriverContextP   ctx,
  1320.     struct i965_post_processing_context *pp_context,
  1321.     const struct i965_surface *src_surface,
  1322.     const VARectangle *src_rect,
  1323.     struct i965_surface *dst_surface,
  1324.     const VARectangle *dst_rect,
  1325.     int                pp_index,
  1326.     void * filter_param
  1327. )
  1328. {
  1329.     VAStatus va_status;
  1330.  
  1331.     va_status = gen8_pp_initialize(ctx, pp_context,
  1332.                                    src_surface,
  1333.                                    src_rect,
  1334.                                    dst_surface,
  1335.                                    dst_rect,
  1336.                                    pp_index,
  1337.                                    filter_param);
  1338.  
  1339.     if (va_status == VA_STATUS_SUCCESS) {
  1340.         gen8_pp_states_setup(ctx, pp_context);
  1341.         gen8_pp_pipeline_setup(ctx, pp_context);
  1342.     }
  1343.  
  1344.     return va_status;
  1345. }
  1346.  
  1347. static void
  1348. gen8_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
  1349. {
  1350.     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
  1351.     pp_context->surface_state_binding_table.bo = NULL;
  1352.  
  1353.     dri_bo_unreference(pp_context->pp_dndi_context.stmm_bo);
  1354.     pp_context->pp_dndi_context.stmm_bo = NULL;
  1355.  
  1356.     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
  1357.     pp_context->pp_dn_context.stmm_bo = NULL;
  1358.  
  1359.     if (pp_context->instruction_state.bo) {
  1360.         dri_bo_unreference(pp_context->instruction_state.bo);
  1361.         pp_context->instruction_state.bo = NULL;
  1362.     }
  1363.  
  1364.     if (pp_context->indirect_state.bo) {
  1365.         dri_bo_unreference(pp_context->indirect_state.bo);
  1366.         pp_context->indirect_state.bo = NULL;
  1367.     }
  1368.  
  1369.     if (pp_context->dynamic_state.bo) {
  1370.         dri_bo_unreference(pp_context->dynamic_state.bo);
  1371.         pp_context->dynamic_state.bo = NULL;
  1372.     }
  1373.  
  1374.     free(pp_context->pp_static_parameter);
  1375.     free(pp_context->pp_inline_parameter);
  1376.     pp_context->pp_static_parameter = NULL;
  1377.     pp_context->pp_inline_parameter = NULL;
  1378. }
  1379.  
  1380. #define VPP_CURBE_ALLOCATION_SIZE       32
  1381.  
  1382. void
  1383. gen8_post_processing_context_init(VADriverContextP ctx,
  1384.                                   void *data,
  1385.                                   struct intel_batchbuffer *batch)
  1386. {
  1387.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  1388.     int i, kernel_size;
  1389.     unsigned int kernel_offset, end_offset;
  1390.     unsigned char *kernel_ptr;
  1391.     struct pp_module *pp_module;
  1392.     struct i965_post_processing_context *pp_context = data;
  1393.  
  1394.     {
  1395.         pp_context->vfe_gpu_state.max_num_threads = 60;
  1396.         pp_context->vfe_gpu_state.num_urb_entries = 59;
  1397.         pp_context->vfe_gpu_state.gpgpu_mode = 0;
  1398.         pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
  1399.         pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
  1400.     }
  1401.  
  1402.     pp_context->intel_post_processing = gen8_post_processing;
  1403.     pp_context->finalize = gen8_post_processing_context_finalize;
  1404.  
  1405.     assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen8));
  1406.  
  1407.     memcpy(pp_context->pp_modules, pp_modules_gen8, sizeof(pp_context->pp_modules));
  1408.  
  1409.     kernel_size = 4096 ;
  1410.  
  1411.     for (i = 0; i < NUM_PP_MODULES; i++) {
  1412.         pp_module = &pp_context->pp_modules[i];
  1413.  
  1414.         if (pp_module->kernel.bin && pp_module->kernel.size) {
  1415.             kernel_size += pp_module->kernel.size;
  1416.         }
  1417.     }
  1418.  
  1419.     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
  1420.                                   "kernel shader",
  1421.                                   kernel_size,
  1422.                                   0x1000);
  1423.     if (pp_context->instruction_state.bo == NULL) {
  1424.         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
  1425.         return;
  1426.     }
  1427.  
  1428.     assert(pp_context->instruction_state.bo);
  1429.  
  1430.  
  1431.     pp_context->instruction_state.bo_size = kernel_size;
  1432.     pp_context->instruction_state.end_offset = 0;
  1433.     end_offset = 0;
  1434.  
  1435.     dri_bo_map(pp_context->instruction_state.bo, 1);
  1436.     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
  1437.  
  1438.     for (i = 0; i < NUM_PP_MODULES; i++) {
  1439.         pp_module = &pp_context->pp_modules[i];
  1440.  
  1441.         kernel_offset = ALIGN(end_offset, 64);
  1442.         pp_module->kernel.kernel_offset = kernel_offset;
  1443.  
  1444.         if (pp_module->kernel.bin && pp_module->kernel.size) {
  1445.  
  1446.             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
  1447.             end_offset = kernel_offset + pp_module->kernel.size;
  1448.         }
  1449.     }
  1450.  
  1451.     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
  1452.  
  1453.     dri_bo_unmap(pp_context->instruction_state.bo);
  1454.  
  1455.     /* static & inline parameters */
  1456.     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
  1457.     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
  1458.  
  1459.     pp_context->pp_dndi_context.current_out_surface = VA_INVALID_SURFACE;
  1460.     pp_context->pp_dndi_context.current_out_obj_surface = NULL;
  1461.     pp_context->pp_dndi_context.frame_order = -1;
  1462.     pp_context->batch = batch;
  1463.  
  1464.     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
  1465.     pp_context->curbe_size = 256;
  1466. }
  1467.