Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the
  6.  * "Software"), to deal in the Software without restriction, including
  7.  * without limitation the rights to use, copy, modify, merge, publish,
  8.  * distribute, sub license, and/or sell copies of the Software, and to
  9.  * permit persons to whom the Software is furnished to do so, subject to
  10.  * the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice (including the
  13.  * next paragraph) shall be included in all copies or substantial portions
  14.  * of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  17.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  18.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  19.  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
  20.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  21.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  22.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Xiang Haihao <haihao.xiang@intel.com>
  26.  *    Zhao Yakui <yakui.zhao@intel.com>
  27.  *
  28.  */
  29.  
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include <assert.h>
  34.  
  35. #include "intel_batchbuffer.h"
  36. #include "intel_driver.h"
  37. #include "i965_defines.h"
  38. #include "i965_structs.h"
  39. #include "i965_drv_video.h"
  40. #include "i965_post_processing.h"
  41. #include "i965_render.h"
  42. #include "i965_yuv_coefs.h"
  43. #include "intel_media.h"
  44.  
  45. #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
  46.  
  47. #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
  48. #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
  49.  
  50. #define GPU_ASM_BLOCK_WIDTH         16
  51. #define GPU_ASM_BLOCK_HEIGHT        8
  52. #define GPU_ASM_X_OFFSET_ALIGNMENT  4
  53.  
  54. #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
  55.  
  56. VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  57.                             const struct i965_surface *src_surface,
  58.                             const VARectangle *src_rect,
  59.                             struct i965_surface *dst_surface,
  60.                             const VARectangle *dst_rect,
  61.                             void *filter_param);
  62.  
  63. VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  64.                                     const struct i965_surface *src_surface,
  65.                                     const VARectangle *src_rect,
  66.                                     struct i965_surface *dst_surface,
  67.                                     const VARectangle *dst_rect,
  68.                                     void *filter_param);
  69.  
  70. /* TODO: Modify the shader and then compile it again.
  71.  * Currently it is derived from Haswell*/
  72. static const uint32_t pp_null_gen8[][4] = {
  73. };
  74.  
  75. static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
  76. #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
  77. };
  78.  
  79. static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
  80. #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
  81. };
  82.  
  83. static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
  84. #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
  85. };
  86.  
  87. static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
  88. #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
  89. };
  90.  
  91. static const uint32_t pp_nv12_scaling_gen8[][4] = {
  92. #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
  93. };
  94.  
  95. static const uint32_t pp_nv12_avs_gen8[][4] = {
  96. #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
  97. };
  98.  
  99. static const uint32_t pp_nv12_dndi_gen8[][4] = {
  100. // #include "shaders/post_processing/gen7/dndi.g75b"
  101. };
  102.  
  103. static const uint32_t pp_nv12_dn_gen8[][4] = {
  104. // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
  105. };
  106. static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
  107. #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
  108. };
  109. static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
  110. #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
  111. };
  112. static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
  113. #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
  114. };
  115. static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
  116. #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
  117. };
  118. static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
  119. #include "shaders/post_processing/gen8/pa_to_pa.g8b"
  120. };
  121. static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
  122. #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
  123. };
  124. static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
  125. #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
  126. };
  127.  
  128. static struct pp_module pp_modules_gen8[] = {
  129.     {
  130.         {
  131.             "NULL module (for testing)",
  132.             PP_NULL,
  133.             pp_null_gen8,
  134.             sizeof(pp_null_gen8),
  135.             NULL,
  136.         },
  137.  
  138.         pp_null_initialize,
  139.     },
  140.  
  141.     {
  142.         {
  143.             "NV12_NV12",
  144.             PP_NV12_LOAD_SAVE_N12,
  145.             pp_nv12_load_save_nv12_gen8,
  146.             sizeof(pp_nv12_load_save_nv12_gen8),
  147.             NULL,
  148.         },
  149.  
  150.         gen8_pp_plx_avs_initialize,
  151.     },
  152.  
  153.     {
  154.         {
  155.             "NV12_PL3",
  156.             PP_NV12_LOAD_SAVE_PL3,
  157.             pp_nv12_load_save_pl3_gen8,
  158.             sizeof(pp_nv12_load_save_pl3_gen8),
  159.             NULL,
  160.         },
  161.         gen8_pp_plx_avs_initialize,
  162.     },
  163.  
  164.     {
  165.         {
  166.             "PL3_NV12",
  167.             PP_PL3_LOAD_SAVE_N12,
  168.             pp_pl3_load_save_nv12_gen8,
  169.             sizeof(pp_pl3_load_save_nv12_gen8),
  170.             NULL,
  171.         },
  172.  
  173.         gen8_pp_plx_avs_initialize,
  174.     },
  175.  
  176.     {
  177.         {
  178.             "PL3_PL3",
  179.             PP_PL3_LOAD_SAVE_PL3,
  180.             pp_pl3_load_save_pl3_gen8,
  181.             sizeof(pp_pl3_load_save_pl3_gen8),
  182.             NULL,
  183.         },
  184.  
  185.         gen8_pp_plx_avs_initialize,
  186.     },
  187.  
  188.     {
  189.         {
  190.             "NV12 Scaling module",
  191.             PP_NV12_SCALING,
  192.             pp_nv12_scaling_gen8,
  193.             sizeof(pp_nv12_scaling_gen8),
  194.             NULL,
  195.         },
  196.  
  197.         gen8_pp_plx_avs_initialize,
  198.     },
  199.  
  200.     {
  201.         {
  202.             "NV12 AVS module",
  203.             PP_NV12_AVS,
  204.             pp_nv12_avs_gen8,
  205.             sizeof(pp_nv12_avs_gen8),
  206.             NULL,
  207.         },
  208.  
  209.         gen8_pp_plx_avs_initialize,
  210.     },
  211.  
  212.     {
  213.         {
  214.             "NV12 DNDI module",
  215.             PP_NV12_DNDI,
  216.             pp_nv12_dndi_gen8,
  217.             sizeof(pp_nv12_dndi_gen8),
  218.             NULL,
  219.         },
  220.  
  221.         pp_null_initialize,
  222.     },
  223.  
  224.     {
  225.         {
  226.             "NV12 DN module",
  227.             PP_NV12_DN,
  228.             pp_nv12_dn_gen8,
  229.             sizeof(pp_nv12_dn_gen8),
  230.             NULL,
  231.         },
  232.  
  233.         pp_null_initialize,
  234.     },
  235.     {
  236.         {
  237.             "NV12_PA module",
  238.             PP_NV12_LOAD_SAVE_PA,
  239.             pp_nv12_load_save_pa_gen8,
  240.             sizeof(pp_nv12_load_save_pa_gen8),
  241.             NULL,
  242.         },
  243.  
  244.         gen8_pp_plx_avs_initialize,
  245.     },
  246.  
  247.     {
  248.         {
  249.             "PL3_PA module",
  250.             PP_PL3_LOAD_SAVE_PA,
  251.             pp_pl3_load_save_pa_gen8,
  252.             sizeof(pp_pl3_load_save_pa_gen8),
  253.             NULL,
  254.         },
  255.  
  256.         gen8_pp_plx_avs_initialize,
  257.     },
  258.  
  259.     {
  260.         {
  261.             "PA_NV12 module",
  262.             PP_PA_LOAD_SAVE_NV12,
  263.             pp_pa_load_save_nv12_gen8,
  264.             sizeof(pp_pa_load_save_nv12_gen8),
  265.             NULL,
  266.         },
  267.  
  268.         gen8_pp_plx_avs_initialize,
  269.     },
  270.  
  271.     {
  272.         {
  273.             "PA_PL3 module",
  274.             PP_PA_LOAD_SAVE_PL3,
  275.             pp_pa_load_save_pl3_gen8,
  276.             sizeof(pp_pa_load_save_pl3_gen8),
  277.             NULL,
  278.         },
  279.  
  280.         gen8_pp_plx_avs_initialize,
  281.     },
  282.  
  283.     {
  284.         {
  285.             "PA_PA module",
  286.             PP_PA_LOAD_SAVE_PA,
  287.             pp_pa_load_save_pa_gen8,
  288.             sizeof(pp_pa_load_save_pa_gen8),
  289.             NULL,
  290.         },
  291.  
  292.         gen8_pp_plx_avs_initialize,
  293.     },
  294.  
  295.     {
  296.         {
  297.             "RGBX_NV12 module",
  298.             PP_RGBX_LOAD_SAVE_NV12,
  299.             pp_rgbx_load_save_nv12_gen8,
  300.             sizeof(pp_rgbx_load_save_nv12_gen8),
  301.             NULL,
  302.         },
  303.  
  304.         gen8_pp_plx_avs_initialize,
  305.     },
  306.  
  307.     {
  308.         {
  309.             "NV12_RGBX module",
  310.             PP_NV12_LOAD_SAVE_RGBX,
  311.             pp_nv12_load_save_rgbx_gen8,
  312.             sizeof(pp_nv12_load_save_rgbx_gen8),
  313.             NULL,
  314.         },
  315.  
  316.         gen8_pp_plx_avs_initialize,
  317.     },
  318. };
  319.  
  320. static int
  321. pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
  322. {
  323.     int fourcc;
  324.  
  325.     if (surface->type == I965_SURFACE_TYPE_IMAGE) {
  326.         struct object_image *obj_image = (struct object_image *)surface->base;
  327.         fourcc = obj_image->image.format.fourcc;
  328.     } else {
  329.         struct object_surface *obj_surface = (struct object_surface *)surface->base;
  330.         fourcc = obj_surface->fourcc;
  331.     }
  332.  
  333.     return fourcc;
  334. }
  335.  
  336. static void
  337. gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
  338. {
  339.     switch (tiling) {
  340.     case I915_TILING_NONE:
  341.         ss->ss0.tiled_surface = 0;
  342.         ss->ss0.tile_walk = 0;
  343.         break;
  344.     case I915_TILING_X:
  345.         ss->ss0.tiled_surface = 1;
  346.         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
  347.         break;
  348.     case I915_TILING_Y:
  349.         ss->ss0.tiled_surface = 1;
  350.         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
  351.         break;
  352.     }
  353. }
  354.  
  355. static void
  356. gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
  357. {
  358.     switch (tiling) {
  359.     case I915_TILING_NONE:
  360.         ss->ss2.tiled_surface = 0;
  361.         ss->ss2.tile_walk = 0;
  362.         break;
  363.     case I915_TILING_X:
  364.         ss->ss2.tiled_surface = 1;
  365.         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
  366.         break;
  367.     case I915_TILING_Y:
  368.         ss->ss2.tiled_surface = 1;
  369.         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
  370.         break;
  371.     }
  372. }
  373.  
  374.  
  375. static void
  376. gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  377.                           dri_bo *surf_bo, unsigned long surf_bo_offset,
  378.                           int width, int height, int pitch, int format,
  379.                           int index, int is_target)
  380. {
  381.     struct gen8_surface_state *ss;
  382.     dri_bo *ss_bo;
  383.     unsigned int tiling;
  384.     unsigned int swizzle;
  385.  
  386.     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
  387.     ss_bo = pp_context->surface_state_binding_table.bo;
  388.     assert(ss_bo);
  389.  
  390.     dri_bo_map(ss_bo, True);
  391.     assert(ss_bo->virtual);
  392.     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
  393.     memset(ss, 0, sizeof(*ss));
  394.     ss->ss0.surface_type = I965_SURFACE_2D;
  395.     ss->ss0.surface_format = format;
  396.     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
  397.     ss->ss2.width = width - 1;
  398.     ss->ss2.height = height - 1;
  399.     ss->ss3.pitch = pitch - 1;
  400.  
  401.     /* Always set 1(align 4 mode) per B-spec */
  402.     ss->ss0.vertical_alignment = 1;
  403.     ss->ss0.horizontal_alignment = 1;
  404.  
  405.     gen8_pp_set_surface_tiling(ss, tiling);
  406.     gen8_render_set_surface_scs(ss);
  407.     dri_bo_emit_reloc(ss_bo,
  408.                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
  409.                       surf_bo_offset,
  410.                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
  411.                       surf_bo);
  412.     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
  413.     dri_bo_unmap(ss_bo);
  414. }
  415.  
  416.  
  417. static void
  418. gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  419.                            dri_bo *surf_bo, unsigned long surf_bo_offset,
  420.                            int width, int height, int wpitch,
  421.                            int xoffset, int yoffset,
  422.                            int format, int interleave_chroma,
  423.                            int index)
  424. {
  425.     struct gen8_surface_state2 *ss2;
  426.     dri_bo *ss2_bo;
  427.     unsigned int tiling;
  428.     unsigned int swizzle;
  429.  
  430.     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
  431.     ss2_bo = pp_context->surface_state_binding_table.bo;
  432.     assert(ss2_bo);
  433.  
  434.     dri_bo_map(ss2_bo, True);
  435.     assert(ss2_bo->virtual);
  436.     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
  437.     memset(ss2, 0, sizeof(*ss2));
  438.     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
  439.     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
  440.     ss2->ss1.width = width - 1;
  441.     ss2->ss1.height = height - 1;
  442.     ss2->ss2.pitch = wpitch - 1;
  443.     ss2->ss2.interleave_chroma = interleave_chroma;
  444.     ss2->ss2.surface_format = format;
  445.     ss2->ss3.x_offset_for_cb = xoffset;
  446.     ss2->ss3.y_offset_for_cb = yoffset;
  447.     gen8_pp_set_surface2_tiling(ss2, tiling);
  448.     dri_bo_emit_reloc(ss2_bo,
  449.                       I915_GEM_DOMAIN_RENDER, 0,
  450.                       surf_bo_offset,
  451.                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
  452.                       surf_bo);
  453.     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
  454.     dri_bo_unmap(ss2_bo);
  455. }
  456.  
  457. static void
  458. gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  459.                                      const struct i965_surface *surface,
  460.                                      int base_index, int is_target,
  461.                                      const VARectangle *rect,
  462.                                      int *width, int *height, int *pitch, int *offset)
  463. {
  464.     struct object_surface *obj_surface;
  465.     struct object_image *obj_image;
  466.     dri_bo *bo;
  467.     int fourcc = pp_get_surface_fourcc(ctx, surface);
  468.     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
  469.  
  470.     if (fourcc_info == NULL)
  471.         return;
  472.  
  473.     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
  474.         obj_surface = (struct object_surface *)surface->base;
  475.         bo = obj_surface->bo;
  476.         width[0] = MIN(rect->x + rect->width, obj_surface->orig_width);
  477.         height[0] = MIN(rect->y + rect->height, obj_surface->orig_height);
  478.         pitch[0] = obj_surface->width;
  479.         offset[0] = 0;
  480.  
  481.         if (fourcc_info->num_planes == 1 && is_target)
  482.             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
  483.  
  484.         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
  485.         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
  486.         pitch[1] = obj_surface->cb_cr_pitch;
  487.         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
  488.  
  489.         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_surface->cb_cr_width);
  490.         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_surface->cb_cr_height);
  491.         pitch[2] = obj_surface->cb_cr_pitch;
  492.         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
  493.     } else {
  494.         int U = 0, V = 0;
  495.  
  496.         /* FIXME: add support for ARGB/ABGR image */
  497.         obj_image = (struct object_image *)surface->base;
  498.         bo = obj_image->bo;
  499.         width[0] = MIN(rect->x + rect->width, obj_image->image.width);
  500.         height[0] = MIN(rect->y + rect->height, obj_image->image.height);
  501.         pitch[0] = obj_image->image.pitches[0];
  502.         offset[0] = obj_image->image.offsets[0];
  503.  
  504.         if (fourcc_info->num_planes == 1) {
  505.             if (is_target)
  506.                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
  507.         } else if (fourcc_info->num_planes == 2) {
  508.             U = 1, V = 1;
  509.         } else {
  510.             assert(fourcc_info->num_components == 3);
  511.  
  512.             U = fourcc_info->components[1].plane;
  513.             V = fourcc_info->components[2].plane;
  514.             assert((U == 1 && V == 2) ||
  515.                    (U == 2 && V == 1));
  516.         }
  517.  
  518.         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
  519.         width[1] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
  520.         height[1] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
  521.         pitch[1] = obj_image->image.pitches[U];
  522.         offset[1] = obj_image->image.offsets[U];
  523.  
  524.         width[2] = MIN(rect->x / fourcc_info->hfactor + rect->width / fourcc_info->hfactor, obj_image->image.width / fourcc_info->hfactor);
  525.         height[2] = MIN(rect->y / fourcc_info->vfactor + rect->height / fourcc_info->vfactor, obj_image->image.height / fourcc_info->vfactor);
  526.         pitch[2] = obj_image->image.pitches[V];
  527.         offset[2] = obj_image->image.offsets[V];
  528.     }
  529.  
  530.     if (is_target) {
  531.         gen8_pp_set_surface_state(ctx, pp_context,
  532.                                   bo, 0,
  533.                                   ALIGN(width[0], 4) / 4, height[0], pitch[0],
  534.                                   I965_SURFACEFORMAT_R8_UINT,
  535.                                   base_index, 1);
  536.  
  537.         if (fourcc_info->num_planes == 2) {
  538.             gen8_pp_set_surface_state(ctx, pp_context,
  539.                                       bo, offset[1],
  540.                                       ALIGN(width[1], 2) / 2, height[1], pitch[1],
  541.                                       I965_SURFACEFORMAT_R8G8_SINT,
  542.                                       base_index + 1, 1);
  543.         } else if (fourcc_info->num_planes == 3) {
  544.             gen8_pp_set_surface_state(ctx, pp_context,
  545.                                       bo, offset[1],
  546.                                       ALIGN(width[1], 4) / 4, height[1], pitch[1],
  547.                                       I965_SURFACEFORMAT_R8_SINT,
  548.                                       base_index + 1, 1);
  549.             gen8_pp_set_surface_state(ctx, pp_context,
  550.                                       bo, offset[2],
  551.                                       ALIGN(width[2], 4) / 4, height[2], pitch[2],
  552.                                       I965_SURFACEFORMAT_R8_SINT,
  553.                                       base_index + 2, 1);
  554.         }
  555.  
  556.         if (fourcc_info->format == I965_COLOR_RGB) {
  557.             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  558.             /* the format is MSB: X-B-G-R */
  559.             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
  560.             if ((fourcc == VA_FOURCC_BGRA) ||
  561.                 (fourcc == VA_FOURCC_BGRX)) {
  562.                 /* It is stored as MSB: X-R-G-B */
  563.                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
  564.             }
  565.         }
  566.     } else {
  567.         int format0 = SURFACE_FORMAT_Y8_UNORM;
  568.  
  569.         switch (fourcc) {
  570.         case VA_FOURCC_YUY2:
  571.             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
  572.             break;
  573.  
  574.         case VA_FOURCC_UYVY:
  575.             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
  576.             break;
  577.  
  578.         default:
  579.             break;
  580.         }
  581.  
  582.         if (fourcc_info->format == I965_COLOR_RGB) {
  583.             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  584.             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
  585.             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
  586.             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
  587.             if ((fourcc == VA_FOURCC_BGRA) ||
  588.                 (fourcc == VA_FOURCC_BGRX)) {
  589.                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
  590.             }
  591.         }
  592.  
  593.         gen8_pp_set_surface2_state(ctx, pp_context,
  594.                                    bo, offset[0],
  595.                                    width[0], height[0], pitch[0],
  596.                                    0, 0,
  597.                                    format0, 0,
  598.                                    base_index);
  599.  
  600.         if (fourcc_info->num_planes == 2) {
  601.             gen8_pp_set_surface2_state(ctx, pp_context,
  602.                                        bo, offset[1],
  603.                                        width[1], height[1], pitch[1],
  604.                                        0, 0,
  605.                                        SURFACE_FORMAT_R8B8_UNORM, 0,
  606.                                        base_index + 1);
  607.         } else if (fourcc_info->num_planes == 3) {
  608.             gen8_pp_set_surface2_state(ctx, pp_context,
  609.                                        bo, offset[1],
  610.                                        width[1], height[1], pitch[1],
  611.                                        0, 0,
  612.                                        SURFACE_FORMAT_R8_UNORM, 0,
  613.                                        base_index + 1);
  614.             gen8_pp_set_surface2_state(ctx, pp_context,
  615.                                        bo, offset[2],
  616.                                        width[2], height[2], pitch[2],
  617.                                        0, 0,
  618.                                        SURFACE_FORMAT_R8_UNORM, 0,
  619.                                        base_index + 2);
  620.         }
  621.     }
  622. }
  623.  
  624. static int
  625. pp_null_x_steps(void *private_context)
  626. {
  627.     return 1;
  628. }
  629.  
  630. static int
  631. pp_null_y_steps(void *private_context)
  632. {
  633.     return 1;
  634. }
  635.  
  636. static int
  637. pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
  638. {
  639.     return 0;
  640. }
  641.  
  642. VAStatus
  643. pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  644.                    const struct i965_surface *src_surface,
  645.                    const VARectangle *src_rect,
  646.                    struct i965_surface *dst_surface,
  647.                    const VARectangle *dst_rect,
  648.                    void *filter_param)
  649. {
  650.     /* private function & data */
  651.     pp_context->pp_x_steps = pp_null_x_steps;
  652.     pp_context->pp_y_steps = pp_null_y_steps;
  653.     pp_context->private_context = NULL;
  654.     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
  655.  
  656.     dst_surface->flags = src_surface->flags;
  657.  
  658.     return VA_STATUS_SUCCESS;
  659. }
  660.  
  661. static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
  662. {
  663.     int i, dst_width_adjust;
  664.     /* x offset of dest surface must be dword aligned.
  665.      * so we have to extend dst surface on left edge, and mask out pixels not interested
  666.      */
  667.     if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
  668.         pp_context->block_horizontal_mask_left = 0;
  669.         for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
  670.         {
  671.             pp_context->block_horizontal_mask_left |= 1<<i;
  672.         }
  673.     }
  674.     else {
  675.         pp_context->block_horizontal_mask_left = 0xffff;
  676.     }
  677.  
  678.     dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
  679.     if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
  680.         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
  681.     }
  682.     else {
  683.         pp_context->block_horizontal_mask_right = 0xffff;
  684.     }
  685.  
  686.     if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
  687.         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
  688.     }
  689.     else {
  690.         pp_context->block_vertical_mask_bottom = 0xff;
  691.     }
  692.  
  693. }
  694.  
  695. static int
  696. gen7_pp_avs_x_steps(void *private_context)
  697. {
  698.     struct pp_avs_context *pp_avs_context = private_context;
  699.  
  700.     return pp_avs_context->dest_w / 16;
  701. }
  702.  
  703. static int
  704. gen7_pp_avs_y_steps(void *private_context)
  705. {
  706.     struct pp_avs_context *pp_avs_context = private_context;
  707.  
  708.     return pp_avs_context->dest_h / 16;
  709. }
  710.  
  711. static int
  712. gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
  713. {
  714.     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
  715.     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
  716.  
  717.     pp_inline_parameter->grf9.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
  718.     pp_inline_parameter->grf9.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
  719.     pp_inline_parameter->grf9.constant_0 = 0xffffffff;
  720.     pp_inline_parameter->grf9.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
  721.  
  722.     return 0;
  723. }
  724.  
  725. static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
  726.                                               struct i965_post_processing_context *pp_context,
  727.                                               const struct i965_surface *surface)
  728. {
  729.     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  730.     int fourcc = pp_get_surface_fourcc(ctx, surface);
  731.  
  732.     if (fourcc == VA_FOURCC_YUY2) {
  733.         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
  734.         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
  735.         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
  736.     } else if (fourcc == VA_FOURCC_UYVY) {
  737.         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
  738.         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
  739.         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
  740.     }
  741. }
  742.  
  743. static const AVSConfig gen8_avs_config = {
  744.     .coeff_frac_bits = 6,
  745.     .coeff_epsilon = 1.0f / (1U << 6),
  746.     .num_phases = 16,
  747.     .num_luma_coeffs = 8,
  748.     .num_chroma_coeffs = 4,
  749.  
  750.     .coeff_range = {
  751.         .lower_bound = {
  752.             .y_k_h = { -2, -2, -2, -2, -2, -2, -2, -2 },
  753.             .y_k_v = { -2, -2, -2, -2, -2, -2, -2, -2 },
  754.             .uv_k_h = { -1, -2, -2, -1 },
  755.             .uv_k_v = { -1, -2, -2, -1 },
  756.         },
  757.         .upper_bound = {
  758.             .y_k_h = { 2, 2, 2, 2, 2, 2, 2, 2 },
  759.             .y_k_v = { 2, 2, 2, 2, 2, 2, 2, 2 },
  760.             .uv_k_h = { 1, 2, 2, 1 },
  761.             .uv_k_v = { 1, 2, 2, 1 },
  762.         },
  763.     },
  764. };
  765.  
  766. VAStatus
  767. gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
  768.                            const struct i965_surface *src_surface,
  769.                            const VARectangle *src_rect,
  770.                            struct i965_surface *dst_surface,
  771.                            const VARectangle *dst_rect,
  772.                            void *filter_param)
  773. {
  774. /* TODO: Add the sampler_8x8 state */
  775.     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
  776.     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
  777.     struct gen8_sampler_8x8_avs *sampler_8x8;
  778.     int i;
  779.     int width[3], height[3], pitch[3], offset[3];
  780.     int src_width, src_height;
  781.     unsigned char *cc_ptr;
  782.     AVSState * const avs = &pp_avs_context->state;
  783.     float sx, sy;
  784.     const float * yuv_to_rgb_coefs;
  785.     size_t yuv_to_rgb_coefs_size;
  786.  
  787.     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
  788.  
  789.     /* source surface */
  790.     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
  791.                                          src_rect,
  792.                                          width, height, pitch, offset);
  793.     src_height = height[0];
  794.     src_width  = width[0];
  795.  
  796.     /* destination surface */
  797.     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
  798.                                          dst_rect,
  799.                                          width, height, pitch, offset);
  800.  
  801.     /* sampler 8x8 state */
  802.     dri_bo_map(pp_context->dynamic_state.bo, True);
  803.     assert(pp_context->dynamic_state.bo->virtual);
  804.  
  805.     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
  806.                         pp_context->sampler_offset;
  807.     /* Currently only one gen8 sampler_8x8 is initialized */
  808.     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
  809.     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
  810.  
  811.     sampler_8x8->dw0.gain_factor = 44;
  812.     sampler_8x8->dw0.weak_edge_threshold = 1;
  813.     sampler_8x8->dw0.strong_edge_threshold = 8;
  814.     /* Use the value like that on Ivy instead of default
  815.      * sampler_8x8->dw0.r3x_coefficient = 5;
  816.      */
  817.     sampler_8x8->dw0.r3x_coefficient = 27;
  818.     sampler_8x8->dw0.r3c_coefficient = 5;
  819.  
  820.     sampler_8x8->dw2.global_noise_estimation = 255;
  821.     sampler_8x8->dw2.non_edge_weight = 1;
  822.     sampler_8x8->dw2.regular_weight = 2;
  823.     sampler_8x8->dw2.strong_edge_weight = 7;
  824.     /* Use the value like that on Ivy instead of default
  825.      * sampler_8x8->dw2.r5x_coefficient = 7;
  826.      * sampler_8x8->dw2.r5cx_coefficient = 7;
  827.      * sampler_8x8->dw2.r5c_coefficient = 7;
  828.      */
  829.     sampler_8x8->dw2.r5x_coefficient = 9;
  830.     sampler_8x8->dw2.r5cx_coefficient = 8;
  831.     sampler_8x8->dw2.r5c_coefficient = 3;
  832.  
  833.     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
  834.     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
  835.     sampler_8x8->dw3.sat_max = 0x1f;
  836.     sampler_8x8->dw3.hue_max = 14;
  837.     /* The 8tap filter will determine whether the adaptive Filter is
  838.      * applied for all channels(dw153).
  839.      * If the 8tap filter is disabled, the adaptive filter should be disabled.
  840.      * Only when 8tap filter is enabled, it can be enabled or not.
  841.      */
  842.     sampler_8x8->dw3.enable_8tap_filter = 3;
  843.     sampler_8x8->dw3.ief4_smooth_enable = 0;
  844.  
  845.     sampler_8x8->dw4.s3u = 0;
  846.     sampler_8x8->dw4.diamond_margin = 4;
  847.     sampler_8x8->dw4.vy_std_enable = 0;
  848.     sampler_8x8->dw4.umid = 110;
  849.     sampler_8x8->dw4.vmid = 154;
  850.  
  851.     sampler_8x8->dw5.diamond_dv = 0;
  852.     sampler_8x8->dw5.diamond_th = 35;
  853.     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
  854.     sampler_8x8->dw5.hs_margin = 3;
  855.     sampler_8x8->dw5.diamond_du = 2;
  856.  
  857.     sampler_8x8->dw6.y_point1 = 46;
  858.     sampler_8x8->dw6.y_point2 = 47;
  859.     sampler_8x8->dw6.y_point3 = 254;
  860.     sampler_8x8->dw6.y_point4 = 255;
  861.  
  862.     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
  863.  
  864.     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
  865.     sampler_8x8->dw8.p0l = 46;
  866.     sampler_8x8->dw8.p1l = 216;
  867.  
  868.     sampler_8x8->dw9.p2l = 236;
  869.     sampler_8x8->dw9.p3l = 236;
  870.     sampler_8x8->dw9.b0l = 133;
  871.     sampler_8x8->dw9.b1l = 130;
  872.  
  873.     sampler_8x8->dw10.b2l = 130;
  874.     sampler_8x8->dw10.b3l = 130;
  875.     /* s0l = -5 / 256. s2.8 */
  876.     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
  877.     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
  878.  
  879.     sampler_8x8->dw11.s1l = 0;
  880.     sampler_8x8->dw11.s2l = 0;
  881.  
  882.     sampler_8x8->dw12.s3l = 0;
  883.     sampler_8x8->dw12.p0u = 46;
  884.     sampler_8x8->dw12.p1u = 66;
  885.     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
  886.  
  887.     sampler_8x8->dw13.p2u = 130;
  888.     sampler_8x8->dw13.p3u = 236;
  889.     sampler_8x8->dw13.b0u = 143;
  890.     sampler_8x8->dw13.b1u = 163;
  891.  
  892.     sampler_8x8->dw14.b2u = 200;
  893.     sampler_8x8->dw14.b3u = 140;
  894.     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
  895.  
  896.     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
  897.     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
  898.  
  899.     sx = (float)dst_rect->width / src_rect->width;
  900.     sy = (float)dst_rect->height / src_rect->height;
  901.     avs_update_coefficients(avs, sx, sy, pp_context->filter_flags);
  902.  
  903.     assert(avs->config->num_phases >= 16);
  904.     for (i = 0; i <= 16; i++) {
  905.         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
  906.             &sampler_8x8->coefficients[i];
  907.         const AVSCoeffs * const coeffs = &avs->coeffs[i];
  908.  
  909.         sampler_8x8_state->dw0.table_0x_filter_c0 =
  910.             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
  911.         sampler_8x8_state->dw0.table_0y_filter_c0 =
  912.             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
  913.         sampler_8x8_state->dw0.table_0x_filter_c1 =
  914.             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
  915.         sampler_8x8_state->dw0.table_0y_filter_c1 =
  916.             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
  917.  
  918.         sampler_8x8_state->dw1.table_0x_filter_c2 =
  919.             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
  920.         sampler_8x8_state->dw1.table_0y_filter_c2 =
  921.             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
  922.         sampler_8x8_state->dw1.table_0x_filter_c3 =
  923.             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
  924.         sampler_8x8_state->dw1.table_0y_filter_c3 =
  925.             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
  926.  
  927.         sampler_8x8_state->dw2.table_0x_filter_c4 =
  928.             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
  929.         sampler_8x8_state->dw2.table_0y_filter_c4 =
  930.             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
  931.         sampler_8x8_state->dw2.table_0x_filter_c5 =
  932.             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
  933.         sampler_8x8_state->dw2.table_0y_filter_c5 =
  934.             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
  935.  
  936.         sampler_8x8_state->dw3.table_0x_filter_c6 =
  937.             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
  938.         sampler_8x8_state->dw3.table_0y_filter_c6 =
  939.             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
  940.         sampler_8x8_state->dw3.table_0x_filter_c7 =
  941.             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
  942.         sampler_8x8_state->dw3.table_0y_filter_c7 =
  943.             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
  944.  
  945.         sampler_8x8_state->dw4.pad0 = 0;
  946.         sampler_8x8_state->dw5.pad0 = 0;
  947.         sampler_8x8_state->dw4.table_1x_filter_c2 =
  948.             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
  949.         sampler_8x8_state->dw4.table_1x_filter_c3 =
  950.             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
  951.         sampler_8x8_state->dw5.table_1x_filter_c4 =
  952.             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
  953.         sampler_8x8_state->dw5.table_1x_filter_c5 =
  954.             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
  955.  
  956.         sampler_8x8_state->dw6.pad0 =
  957.         sampler_8x8_state->dw7.pad0 =
  958.         sampler_8x8_state->dw6.table_1y_filter_c2 =
  959.             intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
  960.         sampler_8x8_state->dw6.table_1y_filter_c3 =
  961.             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
  962.         sampler_8x8_state->dw7.table_1y_filter_c4 =
  963.             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
  964.         sampler_8x8_state->dw7.table_1y_filter_c5 =
  965.             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
  966.     }
  967.  
  968.     sampler_8x8->dw152.default_sharpness_level =
  969.         -avs_is_needed(pp_context->filter_flags);
  970.     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
  971.     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
  972.     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
  973.  
  974.     for ( ; i <= avs->config->num_phases; i++) {
  975.         struct gen8_sampler_8x8_avs_coefficients * const sampler_8x8_state =
  976.             &sampler_8x8->coefficients1[i - 17];
  977.         const AVSCoeffs * const coeffs = &avs->coeffs[i];
  978.  
  979.         sampler_8x8_state->dw0.table_0x_filter_c0 =
  980.             intel_format_convert(coeffs->y_k_h[0], 1, 6, 1);
  981.         sampler_8x8_state->dw0.table_0y_filter_c0 =
  982.             intel_format_convert(coeffs->y_k_v[0], 1, 6, 1);
  983.         sampler_8x8_state->dw0.table_0x_filter_c1 =
  984.             intel_format_convert(coeffs->y_k_h[1], 1, 6, 1);
  985.         sampler_8x8_state->dw0.table_0y_filter_c1 =
  986.             intel_format_convert(coeffs->y_k_v[1], 1, 6, 1);
  987.  
  988.         sampler_8x8_state->dw1.table_0x_filter_c2 =
  989.             intel_format_convert(coeffs->y_k_h[2], 1, 6, 1);
  990.         sampler_8x8_state->dw1.table_0y_filter_c2 =
  991.             intel_format_convert(coeffs->y_k_v[2], 1, 6, 1);
  992.         sampler_8x8_state->dw1.table_0x_filter_c3 =
  993.             intel_format_convert(coeffs->y_k_h[3], 1, 6, 1);
  994.         sampler_8x8_state->dw1.table_0y_filter_c3 =
  995.             intel_format_convert(coeffs->y_k_v[3], 1, 6, 1);
  996.  
  997.         sampler_8x8_state->dw2.table_0x_filter_c4 =
  998.             intel_format_convert(coeffs->y_k_h[4], 1, 6, 1);
  999.         sampler_8x8_state->dw2.table_0y_filter_c4 =
  1000.             intel_format_convert(coeffs->y_k_v[4], 1, 6, 1);
  1001.         sampler_8x8_state->dw2.table_0x_filter_c5 =
  1002.             intel_format_convert(coeffs->y_k_h[5], 1, 6, 1);
  1003.         sampler_8x8_state->dw2.table_0y_filter_c5 =
  1004.             intel_format_convert(coeffs->y_k_v[5], 1, 6, 1);
  1005.  
  1006.         sampler_8x8_state->dw3.table_0x_filter_c6 =
  1007.             intel_format_convert(coeffs->y_k_h[6], 1, 6, 1);
  1008.         sampler_8x8_state->dw3.table_0y_filter_c6 =
  1009.             intel_format_convert(coeffs->y_k_v[6], 1, 6, 1);
  1010.         sampler_8x8_state->dw3.table_0x_filter_c7 =
  1011.             intel_format_convert(coeffs->y_k_h[7], 1, 6, 1);
  1012.         sampler_8x8_state->dw3.table_0y_filter_c7 =
  1013.             intel_format_convert(coeffs->y_k_v[7], 1, 6, 1);
  1014.  
  1015.         sampler_8x8_state->dw4.pad0 = 0;
  1016.         sampler_8x8_state->dw5.pad0 = 0;
  1017.         sampler_8x8_state->dw4.table_1x_filter_c2 =
  1018.             intel_format_convert(coeffs->uv_k_h[0], 1, 6, 1);
  1019.         sampler_8x8_state->dw4.table_1x_filter_c3 =
  1020.             intel_format_convert(coeffs->uv_k_h[1], 1, 6, 1);
  1021.         sampler_8x8_state->dw5.table_1x_filter_c4 =
  1022.             intel_format_convert(coeffs->uv_k_h[2], 1, 6, 1);
  1023.         sampler_8x8_state->dw5.table_1x_filter_c5 =
  1024.             intel_format_convert(coeffs->uv_k_h[3], 1, 6, 1);
  1025.  
  1026.         sampler_8x8_state->dw6.pad0 =
  1027.         sampler_8x8_state->dw7.pad0 =
  1028.         sampler_8x8_state->dw6.table_1y_filter_c2 =
  1029.             intel_format_convert(coeffs->uv_k_v[0], 1, 6, 1);
  1030.         sampler_8x8_state->dw6.table_1y_filter_c3 =
  1031.             intel_format_convert(coeffs->uv_k_v[1], 1, 6, 1);
  1032.         sampler_8x8_state->dw7.table_1y_filter_c4 =
  1033.             intel_format_convert(coeffs->uv_k_v[2], 1, 6, 1);
  1034.         sampler_8x8_state->dw7.table_1y_filter_c5 =
  1035.             intel_format_convert(coeffs->uv_k_v[3], 1, 6, 1);
  1036.     }
  1037.  
  1038.     dri_bo_unmap(pp_context->dynamic_state.bo);
  1039.  
  1040.  
  1041.     /* private function & data */
  1042.     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
  1043.     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
  1044.     pp_context->private_context = &pp_context->pp_avs_context;
  1045.     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
  1046.  
  1047.     int dst_left_edge_extend = dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
  1048.     pp_avs_context->dest_x = dst_rect->x - dst_left_edge_extend;
  1049.     pp_avs_context->dest_y = dst_rect->y;
  1050.     pp_avs_context->dest_w = ALIGN(dst_rect->width + dst_left_edge_extend, 16);
  1051.     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
  1052.     pp_avs_context->src_w = src_rect->width;
  1053.     pp_avs_context->src_h = src_rect->height;
  1054.     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
  1055.  
  1056.     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
  1057.     dw = MAX(dw, dst_rect->width + dst_left_edge_extend);
  1058.  
  1059.     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
  1060.     pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
  1061.     pp_static_parameter->grf2.alpha = 255;
  1062.  
  1063.     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
  1064.     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
  1065.     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
  1066.         (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
  1067.     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
  1068.         (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
  1069.  
  1070.     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
  1071.  
  1072.     yuv_to_rgb_coefs = i915_color_standard_to_coefs (i915_filter_to_color_standard (src_surface->flags &
  1073.                                                                                     VA_SRC_COLOR_MASK),
  1074.                                                      &yuv_to_rgb_coefs_size);
  1075.     memcpy(&pp_static_parameter->grf7, yuv_to_rgb_coefs, yuv_to_rgb_coefs_size);
  1076.  
  1077.     dst_surface->flags = src_surface->flags;
  1078.  
  1079.     return VA_STATUS_SUCCESS;
  1080. }
  1081.  
  1082. VAStatus
  1083. gen8_pp_initialize(
  1084.     VADriverContextP   ctx,
  1085.     struct i965_post_processing_context *pp_context,
  1086.     const struct i965_surface *src_surface,
  1087.     const VARectangle *src_rect,
  1088.     struct i965_surface *dst_surface,
  1089.     const VARectangle *dst_rect,
  1090.     int                pp_index,
  1091.     void * filter_param
  1092. )
  1093. {
  1094.     VAStatus va_status;
  1095.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  1096.     dri_bo *bo;
  1097.     int bo_size;
  1098.     unsigned int end_offset;
  1099.     struct pp_module *pp_module;
  1100.     int static_param_size, inline_param_size;
  1101.  
  1102.     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
  1103.     bo = dri_bo_alloc(i965->intel.bufmgr,
  1104.                       "surface state & binding table",
  1105.                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
  1106.                       4096);
  1107.     assert(bo);
  1108.     pp_context->surface_state_binding_table.bo = bo;
  1109.  
  1110.     pp_context->idrt.num_interface_descriptors = 0;
  1111.  
  1112.     pp_context->sampler_size = 4 * 4096;
  1113.  
  1114.     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
  1115.                 + pp_context->idrt_size;
  1116.  
  1117.     dri_bo_unreference(pp_context->dynamic_state.bo);
  1118.     bo = dri_bo_alloc(i965->intel.bufmgr,
  1119.                       "dynamic_state",
  1120.                       bo_size,
  1121.                       4096);
  1122.  
  1123.     assert(bo);
  1124.     pp_context->dynamic_state.bo = bo;
  1125.     pp_context->dynamic_state.bo_size = bo_size;
  1126.  
  1127.     end_offset = 0;
  1128.     pp_context->dynamic_state.end_offset = 0;
  1129.  
  1130.     /* Constant buffer offset */
  1131.     pp_context->curbe_offset = ALIGN(end_offset, 64);
  1132.     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
  1133.  
  1134.     /* Interface descriptor offset */
  1135.     pp_context->idrt_offset = ALIGN(end_offset, 64);
  1136.     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
  1137.  
  1138.     /* Sampler state offset */
  1139.     pp_context->sampler_offset = ALIGN(end_offset, 64);
  1140.     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
  1141.  
  1142.     /* update the end offset of dynamic_state */
  1143.     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
  1144.  
  1145.     static_param_size = sizeof(struct gen7_pp_static_parameter);
  1146.     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
  1147.  
  1148.     memset(pp_context->pp_static_parameter, 0, static_param_size);
  1149.     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
  1150.  
  1151.     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
  1152.     pp_context->current_pp = pp_index;
  1153.     pp_module = &pp_context->pp_modules[pp_index];
  1154.  
  1155.     if (pp_module->initialize)
  1156.         va_status = pp_module->initialize(ctx, pp_context,
  1157.                                           src_surface,
  1158.                                           src_rect,
  1159.                                           dst_surface,
  1160.                                           dst_rect,
  1161.                                           filter_param);
  1162.     else
  1163.         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
  1164.  
  1165.     calculate_boundary_block_mask(pp_context, dst_rect);
  1166.  
  1167.     return va_status;
  1168. }
  1169.  
  1170. static void
  1171. gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
  1172.                                    struct i965_post_processing_context *pp_context)
  1173. {
  1174.     struct gen8_interface_descriptor_data *desc;
  1175.     dri_bo *bo;
  1176.     int pp_index = pp_context->current_pp;
  1177.     unsigned char *cc_ptr;
  1178.  
  1179.     bo = pp_context->dynamic_state.bo;
  1180.  
  1181.     dri_bo_map(bo, 1);
  1182.     assert(bo->virtual);
  1183.     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
  1184.  
  1185.     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
  1186.                 pp_context->idrt.num_interface_descriptors;
  1187.  
  1188.     memset(desc, 0, sizeof(*desc));
  1189.     desc->desc0.kernel_start_pointer =
  1190.                 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
  1191.     desc->desc2.single_program_flow = 1;
  1192.     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
  1193.     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
  1194.     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
  1195.     desc->desc4.binding_table_entry_count = 0;
  1196.     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
  1197.     desc->desc5.constant_urb_entry_read_offset = 0;
  1198.  
  1199.     desc->desc5.constant_urb_entry_read_length = 8; /* grf 1-8 */
  1200.  
  1201.     dri_bo_unmap(bo);
  1202.     pp_context->idrt.num_interface_descriptors++;
  1203. }
  1204.  
  1205.  
  1206. static void
  1207. gen8_pp_upload_constants(VADriverContextP ctx,
  1208.                          struct i965_post_processing_context *pp_context)
  1209. {
  1210.     unsigned char *constant_buffer;
  1211.     int param_size;
  1212.  
  1213.     assert(sizeof(struct gen7_pp_static_parameter) == 256);
  1214.  
  1215.     param_size = sizeof(struct gen7_pp_static_parameter);
  1216.  
  1217.     dri_bo_map(pp_context->dynamic_state.bo, 1);
  1218.     assert(pp_context->dynamic_state.bo->virtual);
  1219.     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
  1220.                         pp_context->curbe_offset;
  1221.  
  1222.     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
  1223.     dri_bo_unmap(pp_context->dynamic_state.bo);
  1224.     return;
  1225. }
  1226.  
  1227. void
  1228. gen8_pp_states_setup(VADriverContextP ctx,
  1229.                      struct i965_post_processing_context *pp_context)
  1230. {
  1231.     gen8_pp_interface_descriptor_table(ctx, pp_context);
  1232.     gen8_pp_upload_constants(ctx, pp_context);
  1233. }
  1234.  
  1235. static void
  1236. gen6_pp_pipeline_select(VADriverContextP ctx,
  1237.                         struct i965_post_processing_context *pp_context)
  1238. {
  1239.     struct intel_batchbuffer *batch = pp_context->batch;
  1240.  
  1241.     BEGIN_BATCH(batch, 1);
  1242.     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
  1243.     ADVANCE_BATCH(batch);
  1244. }
  1245.  
  1246. static void
  1247. gen8_pp_state_base_address(VADriverContextP ctx,
  1248.                            struct i965_post_processing_context *pp_context)
  1249. {
  1250.     struct intel_batchbuffer *batch = pp_context->batch;
  1251.  
  1252.     BEGIN_BATCH(batch, 16);
  1253.     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
  1254.         /* DW1 Generate state address */
  1255.     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  1256.         OUT_BATCH(batch, 0);
  1257.         OUT_BATCH(batch, 0);
  1258.  
  1259.         /* DW4-5. Surface state address */
  1260.     OUT_RELOC64(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
  1261.  
  1262.         /* DW6-7. Dynamic state address */
  1263.     OUT_RELOC64(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
  1264.                 0, 0 | BASE_ADDRESS_MODIFY);
  1265.  
  1266.         /* DW8. Indirect object address */
  1267.     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
  1268.         OUT_BATCH(batch, 0);
  1269.  
  1270.         /* DW10-11. Instruction base address */
  1271.     OUT_RELOC64(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
  1272.  
  1273.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1274.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1275.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1276.     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
  1277.     ADVANCE_BATCH(batch);
  1278. }
  1279.  
  1280. void
  1281. gen8_pp_vfe_state(VADriverContextP ctx,
  1282.                   struct i965_post_processing_context *pp_context)
  1283. {
  1284.     struct intel_batchbuffer *batch = pp_context->batch;
  1285.  
  1286.     BEGIN_BATCH(batch, 9);
  1287.     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
  1288.     OUT_BATCH(batch, 0);
  1289.     OUT_BATCH(batch, 0);
  1290.     OUT_BATCH(batch,
  1291.               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
  1292.               pp_context->vfe_gpu_state.num_urb_entries << 8);
  1293.     OUT_BATCH(batch, 0);
  1294.     OUT_BATCH(batch,
  1295.               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
  1296.                 /* URB Entry Allocation Size, in 256 bits unit */
  1297.               (pp_context->vfe_gpu_state.curbe_allocation_size));
  1298.                 /* CURBE Allocation Size, in 256 bits unit */
  1299.     OUT_BATCH(batch, 0);
  1300.     OUT_BATCH(batch, 0);
  1301.     OUT_BATCH(batch, 0);
  1302.     ADVANCE_BATCH(batch);
  1303. }
  1304.  
  1305. void
  1306. gen8_interface_descriptor_load(VADriverContextP ctx,
  1307.                                struct i965_post_processing_context *pp_context)
  1308. {
  1309.     struct intel_batchbuffer *batch = pp_context->batch;
  1310.  
  1311.     BEGIN_BATCH(batch, 6);
  1312.  
  1313.     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
  1314.     OUT_BATCH(batch, 0);
  1315.  
  1316.     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
  1317.     OUT_BATCH(batch, 0);
  1318.     OUT_BATCH(batch,
  1319.               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
  1320.     OUT_BATCH(batch, pp_context->idrt_offset);
  1321.     ADVANCE_BATCH(batch);
  1322. }
  1323.  
  1324. void
  1325. gen8_pp_curbe_load(VADriverContextP ctx,
  1326.                    struct i965_post_processing_context *pp_context)
  1327. {
  1328.     struct intel_batchbuffer *batch = pp_context->batch;
  1329.     int param_size = 64;
  1330.  
  1331.     param_size = sizeof(struct gen7_pp_static_parameter);
  1332.  
  1333.     BEGIN_BATCH(batch, 4);
  1334.     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
  1335.     OUT_BATCH(batch, 0);
  1336.     OUT_BATCH(batch,
  1337.               param_size);
  1338.     OUT_BATCH(batch, pp_context->curbe_offset);
  1339.     ADVANCE_BATCH(batch);
  1340. }
  1341.  
  1342. void
  1343. gen8_pp_object_walker(VADriverContextP ctx,
  1344.                       struct i965_post_processing_context *pp_context)
  1345. {
  1346.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  1347.     struct intel_batchbuffer *batch = pp_context->batch;
  1348.     int x, x_steps, y, y_steps;
  1349.     int param_size, command_length_in_dws, extra_cmd_in_dws;
  1350.     dri_bo *command_buffer;
  1351.     unsigned int *command_ptr;
  1352.  
  1353.     param_size = sizeof(struct gen7_pp_inline_parameter);
  1354.  
  1355.     x_steps = pp_context->pp_x_steps(pp_context->private_context);
  1356.     y_steps = pp_context->pp_y_steps(pp_context->private_context);
  1357.     command_length_in_dws = 6 + (param_size >> 2);
  1358.     extra_cmd_in_dws = 2;
  1359.     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
  1360.                                   "command objects buffer",
  1361.                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
  1362.                                   4096);
  1363.  
  1364.     dri_bo_map(command_buffer, 1);
  1365.     command_ptr = command_buffer->virtual;
  1366.  
  1367.     for (y = 0; y < y_steps; y++) {
  1368.         for (x = 0; x < x_steps; x++) {
  1369.             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
  1370.  
  1371.                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
  1372.                 *command_ptr++ = 0;
  1373.                 *command_ptr++ = 0;
  1374.                 *command_ptr++ = 0;
  1375.                 *command_ptr++ = 0;
  1376.                 *command_ptr++ = 0;
  1377.                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
  1378.                 command_ptr += (param_size >> 2);
  1379.  
  1380.                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
  1381.                 *command_ptr++ = 0;
  1382.             }
  1383.         }
  1384.     }
  1385.  
  1386.     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
  1387.         *command_ptr++ = 0;
  1388.  
  1389.     *command_ptr++ = MI_BATCH_BUFFER_END;
  1390.     *command_ptr++ = 0;
  1391.  
  1392.     dri_bo_unmap(command_buffer);
  1393.  
  1394.     BEGIN_BATCH(batch, 3);
  1395.     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
  1396.     OUT_RELOC(batch, command_buffer,
  1397.               I915_GEM_DOMAIN_COMMAND, 0, 0);
  1398.     OUT_BATCH(batch, 0);
  1399.     ADVANCE_BATCH(batch);
  1400.  
  1401.     dri_bo_unreference(command_buffer);
  1402.  
  1403.     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
  1404.      * will cause control to pass back to ring buffer
  1405.      */
  1406.     intel_batchbuffer_end_atomic(batch);
  1407.     intel_batchbuffer_flush(batch);
  1408.     intel_batchbuffer_start_atomic(batch, 0x1000);
  1409. }
  1410.  
  1411. static void
  1412. gen8_pp_pipeline_setup(VADriverContextP ctx,
  1413.                        struct i965_post_processing_context *pp_context)
  1414. {
  1415.     struct intel_batchbuffer *batch = pp_context->batch;
  1416.  
  1417.     intel_batchbuffer_start_atomic(batch, 0x1000);
  1418.     intel_batchbuffer_emit_mi_flush(batch);
  1419.     gen6_pp_pipeline_select(ctx, pp_context);
  1420.     gen8_pp_state_base_address(ctx, pp_context);
  1421.     gen8_pp_vfe_state(ctx, pp_context);
  1422.     gen8_pp_curbe_load(ctx, pp_context);
  1423.     gen8_interface_descriptor_load(ctx, pp_context);
  1424.     gen8_pp_vfe_state(ctx, pp_context);
  1425.     gen8_pp_object_walker(ctx, pp_context);
  1426.     intel_batchbuffer_end_atomic(batch);
  1427. }
  1428.  
  1429. static VAStatus
  1430. gen8_post_processing(
  1431.     VADriverContextP   ctx,
  1432.     struct i965_post_processing_context *pp_context,
  1433.     const struct i965_surface *src_surface,
  1434.     const VARectangle *src_rect,
  1435.     struct i965_surface *dst_surface,
  1436.     const VARectangle *dst_rect,
  1437.     int                pp_index,
  1438.     void * filter_param
  1439. )
  1440. {
  1441.     VAStatus va_status;
  1442.  
  1443.     va_status = gen8_pp_initialize(ctx, pp_context,
  1444.                                    src_surface,
  1445.                                    src_rect,
  1446.                                    dst_surface,
  1447.                                    dst_rect,
  1448.                                    pp_index,
  1449.                                    filter_param);
  1450.  
  1451.     if (va_status == VA_STATUS_SUCCESS) {
  1452.         gen8_pp_states_setup(ctx, pp_context);
  1453.         gen8_pp_pipeline_setup(ctx, pp_context);
  1454.     }
  1455.  
  1456.     return va_status;
  1457. }
  1458.  
  1459. static void
  1460. gen8_post_processing_context_finalize(VADriverContextP ctx,
  1461.     struct i965_post_processing_context *pp_context)
  1462. {
  1463.     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
  1464.     pp_context->surface_state_binding_table.bo = NULL;
  1465.  
  1466.     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
  1467.     pp_context->pp_dn_context.stmm_bo = NULL;
  1468.  
  1469.     if (pp_context->instruction_state.bo) {
  1470.         dri_bo_unreference(pp_context->instruction_state.bo);
  1471.         pp_context->instruction_state.bo = NULL;
  1472.     }
  1473.  
  1474.     if (pp_context->indirect_state.bo) {
  1475.         dri_bo_unreference(pp_context->indirect_state.bo);
  1476.         pp_context->indirect_state.bo = NULL;
  1477.     }
  1478.  
  1479.     if (pp_context->dynamic_state.bo) {
  1480.         dri_bo_unreference(pp_context->dynamic_state.bo);
  1481.         pp_context->dynamic_state.bo = NULL;
  1482.     }
  1483.  
  1484.     free(pp_context->pp_static_parameter);
  1485.     free(pp_context->pp_inline_parameter);
  1486.     pp_context->pp_static_parameter = NULL;
  1487.     pp_context->pp_inline_parameter = NULL;
  1488. }
  1489.  
  1490. #define VPP_CURBE_ALLOCATION_SIZE       32
  1491.  
  1492. void
  1493. gen8_post_processing_context_common_init(VADriverContextP ctx,
  1494.                                          void *data,
  1495.                                          struct pp_module *pp_modules,
  1496.                                          int num_pp_modules,
  1497.                                          struct intel_batchbuffer *batch)
  1498. {
  1499.     struct i965_driver_data *i965 = i965_driver_data(ctx);
  1500.     int i, kernel_size;
  1501.     unsigned int kernel_offset, end_offset;
  1502.     unsigned char *kernel_ptr;
  1503.     struct pp_module *pp_module;
  1504.     struct i965_post_processing_context *pp_context = data;
  1505.  
  1506.     pp_context->vfe_gpu_state.max_num_threads = 60;
  1507.     pp_context->vfe_gpu_state.num_urb_entries = 59;
  1508.     pp_context->vfe_gpu_state.gpgpu_mode = 0;
  1509.     pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
  1510.     pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
  1511.  
  1512.     pp_context->intel_post_processing = gen8_post_processing;
  1513.     pp_context->finalize = gen8_post_processing_context_finalize;
  1514.  
  1515.     assert(ARRAY_ELEMS(pp_context->pp_modules) == num_pp_modules);
  1516.  
  1517.     memcpy(pp_context->pp_modules, pp_modules, sizeof(pp_context->pp_modules));
  1518.  
  1519.     kernel_size = 4096 ;
  1520.  
  1521.     for (i = 0; i < NUM_PP_MODULES; i++) {
  1522.         pp_module = &pp_context->pp_modules[i];
  1523.  
  1524.         if (pp_module->kernel.bin && pp_module->kernel.size) {
  1525.             kernel_size += pp_module->kernel.size;
  1526.         }
  1527.     }
  1528.  
  1529.     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
  1530.                                   "kernel shader",
  1531.                                   kernel_size,
  1532.                                   0x1000);
  1533.     if (pp_context->instruction_state.bo == NULL) {
  1534.         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
  1535.         return;
  1536.     }
  1537.  
  1538.     assert(pp_context->instruction_state.bo);
  1539.  
  1540.  
  1541.     pp_context->instruction_state.bo_size = kernel_size;
  1542.     pp_context->instruction_state.end_offset = 0;
  1543.     end_offset = 0;
  1544.  
  1545.     dri_bo_map(pp_context->instruction_state.bo, 1);
  1546.     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
  1547.  
  1548.     for (i = 0; i < NUM_PP_MODULES; i++) {
  1549.         pp_module = &pp_context->pp_modules[i];
  1550.  
  1551.         kernel_offset = ALIGN(end_offset, 64);
  1552.         pp_module->kernel.kernel_offset = kernel_offset;
  1553.  
  1554.         if (pp_module->kernel.bin && pp_module->kernel.size) {
  1555.  
  1556.             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
  1557.             end_offset = kernel_offset + pp_module->kernel.size;
  1558.         }
  1559.     }
  1560.  
  1561.     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
  1562.  
  1563.     dri_bo_unmap(pp_context->instruction_state.bo);
  1564.  
  1565.     /* static & inline parameters */
  1566.     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
  1567.     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
  1568.  
  1569.     pp_context->batch = batch;
  1570.  
  1571.     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
  1572.     pp_context->curbe_size = 256;
  1573.  
  1574. }
  1575.  
  1576. void
  1577. gen8_post_processing_context_init(VADriverContextP ctx,
  1578.                                   void *data,
  1579.                                   struct intel_batchbuffer *batch)
  1580. {
  1581.     struct i965_post_processing_context *pp_context = data;
  1582.  
  1583.     gen8_post_processing_context_common_init(ctx, data, pp_modules_gen8, ARRAY_ELEMS(pp_modules_gen8), batch);
  1584.     avs_init_state(&pp_context->pp_avs_context.state, &gen8_avs_config);
  1585. }
  1586.