Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2009 Younes Manton.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include <assert.h>
  29.  
  30. #include "pipe/p_compiler.h"
  31. #include "pipe/p_context.h"
  32.  
  33. #include "util/u_memory.h"
  34. #include "util/u_draw.h"
  35. #include "util/u_surface.h"
  36.  
  37. #include "tgsi/tgsi_ureg.h"
  38.  
  39. #include "vl_csc.h"
  40. #include "vl_types.h"
  41. #include "vl_compositor.h"
  42.  
  43. #define MIN_DIRTY (0)
  44. #define MAX_DIRTY (1 << 15)
  45.  
  46. enum VS_OUTPUT
  47. {
  48.    VS_O_VPOS = 0,
  49.    VS_O_COLOR = 0,
  50.    VS_O_VTEX = 0,
  51.    VS_O_VTOP,
  52.    VS_O_VBOTTOM,
  53. };
  54.  
  55. static void *
  56. create_vert_shader(struct vl_compositor *c)
  57. {
  58.    struct ureg_program *shader;
  59.    struct ureg_src vpos, vtex, color;
  60.    struct ureg_dst tmp;
  61.    struct ureg_dst o_vpos, o_vtex, o_color;
  62.    struct ureg_dst o_vtop, o_vbottom;
  63.  
  64.    shader = ureg_create(TGSI_PROCESSOR_VERTEX);
  65.    if (!shader)
  66.       return false;
  67.  
  68.    vpos = ureg_DECL_vs_input(shader, 0);
  69.    vtex = ureg_DECL_vs_input(shader, 1);
  70.    color = ureg_DECL_vs_input(shader, 2);
  71.    tmp = ureg_DECL_temporary(shader);
  72.    o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
  73.    o_color = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR);
  74.    o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX);
  75.    o_vtop = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
  76.    o_vbottom = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
  77.  
  78.    /*
  79.     * o_vpos = vpos
  80.     * o_vtex = vtex
  81.     * o_color = color
  82.     */
  83.    ureg_MOV(shader, o_vpos, vpos);
  84.    ureg_MOV(shader, o_vtex, vtex);
  85.    ureg_MOV(shader, o_color, color);
  86.  
  87.    /*
  88.     * tmp.x = vtex.w / 2
  89.     * tmp.y = vtex.w / 4
  90.     *
  91.     * o_vtop.x = vtex.x
  92.     * o_vtop.y = vtex.y * tmp.x + 0.25f
  93.     * o_vtop.z = vtex.y * tmp.y + 0.25f
  94.     * o_vtop.w = 1 / tmp.x
  95.     *
  96.     * o_vbottom.x = vtex.x
  97.     * o_vbottom.y = vtex.y * tmp.x - 0.25f
  98.     * o_vbottom.z = vtex.y * tmp.y - 0.25f
  99.     * o_vbottom.w = 1 / tmp.y
  100.     */
  101.    ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X),
  102.             ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.5f));
  103.    ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
  104.             ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.25f));
  105.  
  106.    ureg_MOV(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_X), vtex);
  107.    ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  108.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, 0.25f));
  109.    ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  110.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.25f));
  111.    ureg_RCP(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_W),
  112.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
  113.  
  114.    ureg_MOV(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_X), vtex);
  115.    ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  116.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, -0.25f));
  117.    ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  118.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, -0.25f));
  119.    ureg_RCP(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_W),
  120.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
  121.  
  122.    ureg_END(shader);
  123.  
  124.    return ureg_create_shader_and_destroy(shader, c->pipe);
  125. }
  126.  
  127. static void *
  128. create_frag_shader_video_buffer(struct vl_compositor *c)
  129. {
  130.    struct ureg_program *shader;
  131.    struct ureg_src tc;
  132.    struct ureg_src csc[3];
  133.    struct ureg_src sampler[3];
  134.    struct ureg_dst texel;
  135.    struct ureg_dst fragment;
  136.    unsigned i;
  137.  
  138.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  139.    if (!shader)
  140.       return false;
  141.  
  142.    tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
  143.    for (i = 0; i < 3; ++i) {
  144.       csc[i] = ureg_DECL_constant(shader, i);
  145.       sampler[i] = ureg_DECL_sampler(shader, i);
  146.    }
  147.    texel = ureg_DECL_temporary(shader);
  148.    fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  149.  
  150.    /*
  151.     * texel.xyz = tex(tc, sampler[i])
  152.     * fragment = csc * texel
  153.     */
  154.    for (i = 0; i < 3; ++i)
  155.       ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D_ARRAY, tc, sampler[i]);
  156.  
  157.    ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  158.  
  159.    for (i = 0; i < 3; ++i)
  160.       ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
  161.  
  162.    ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  163.  
  164.    ureg_release_temporary(shader, texel);
  165.    ureg_END(shader);
  166.  
  167.    return ureg_create_shader_and_destroy(shader, c->pipe);
  168. }
  169.  
  170. static void *
  171. create_frag_shader_weave(struct vl_compositor *c)
  172. {
  173.    struct ureg_program *shader;
  174.    struct ureg_src i_tc[2];
  175.    struct ureg_src csc[3];
  176.    struct ureg_src sampler[3];
  177.    struct ureg_dst t_tc[2];
  178.    struct ureg_dst t_texel[2];
  179.    struct ureg_dst o_fragment;
  180.    unsigned i, j;
  181.  
  182.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  183.    if (!shader)
  184.       return false;
  185.  
  186.    i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
  187.    i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
  188.  
  189.    for (i = 0; i < 3; ++i) {
  190.       csc[i] = ureg_DECL_constant(shader, i);
  191.       sampler[i] = ureg_DECL_sampler(shader, i);
  192.    }
  193.  
  194.    for (i = 0; i < 2; ++i) {
  195.       t_tc[i] = ureg_DECL_temporary(shader);
  196.       t_texel[i] = ureg_DECL_temporary(shader);
  197.    }
  198.    o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  199.  
  200.    /* calculate the texture offsets
  201.     * t_tc.x = i_tc.x
  202.     * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
  203.     */
  204.    for (i = 0; i < 2; ++i) {
  205.       ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]);
  206.       ureg_SUB(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
  207.                i_tc[i], ureg_imm1f(shader, 0.5f));
  208.       ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]));
  209.       ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W),
  210.                ureg_imm1f(shader, i ? 1.0f : 0.0f));
  211.       ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
  212.                ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f));
  213.       ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y),
  214.                ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W));
  215.       ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z),
  216.                ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W));
  217.    }
  218.  
  219.    /* fetch the texels
  220.     * texel[0..1].x = tex(t_tc[0..1][0])
  221.     * texel[0..1].y = tex(t_tc[0..1][1])
  222.     * texel[0..1].z = tex(t_tc[0..1][2])
  223.     */
  224.    for (i = 0; i < 2; ++i)
  225.       for (j = 0; j < 3; ++j) {
  226.          struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]),
  227.             TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
  228.  
  229.          ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j),
  230.                   TGSI_TEXTURE_2D_ARRAY, src, sampler[j]);
  231.       }
  232.  
  233.    /* calculate linear interpolation factor
  234.     * factor = |round(i_tc.y) - i_tc.y| * 2
  235.     */
  236.    ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]);
  237.    ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
  238.             ureg_src(t_tc[0]), ureg_negate(i_tc[0]));
  239.    ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
  240.             ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f));
  241.    ureg_LRP(shader, t_texel[0], ureg_swizzle(ureg_src(t_tc[0]),
  242.             TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z),
  243.             ureg_src(t_texel[0]), ureg_src(t_texel[1]));
  244.  
  245.    /* and finally do colour space transformation
  246.     * fragment = csc * texel
  247.     */
  248.    ureg_MOV(shader, ureg_writemask(t_texel[0], TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  249.    for (i = 0; i < 3; ++i)
  250.       ureg_DP4(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(t_texel[0]));
  251.  
  252.    ureg_MOV(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  253.  
  254.    for (i = 0; i < 2; ++i) {
  255.       ureg_release_temporary(shader, t_texel[i]);
  256.       ureg_release_temporary(shader, t_tc[i]);
  257.    }
  258.  
  259.    ureg_END(shader);
  260.  
  261.    return ureg_create_shader_and_destroy(shader, c->pipe);
  262. }
  263.  
  264. static void *
  265. create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
  266. {
  267.    struct ureg_program *shader;
  268.    struct ureg_src csc[3];
  269.    struct ureg_src tc;
  270.    struct ureg_src sampler;
  271.    struct ureg_src palette;
  272.    struct ureg_dst texel;
  273.    struct ureg_dst fragment;
  274.    unsigned i;
  275.  
  276.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  277.    if (!shader)
  278.       return false;
  279.  
  280.    for (i = 0; include_cc && i < 3; ++i)
  281.       csc[i] = ureg_DECL_constant(shader, i);
  282.  
  283.    tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
  284.    sampler = ureg_DECL_sampler(shader, 0);
  285.    palette = ureg_DECL_sampler(shader, 1);
  286.  
  287.    texel = ureg_DECL_temporary(shader);
  288.    fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  289.  
  290.    /*
  291.     * texel = tex(tc, sampler)
  292.     * fragment.xyz = tex(texel, palette) * csc
  293.     * fragment.a = texel.a
  294.     */
  295.    ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
  296.    ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
  297.  
  298.    if (include_cc) {
  299.       ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
  300.       for (i = 0; i < 3; ++i)
  301.          ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
  302.    } else {
  303.       ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
  304.                TGSI_TEXTURE_1D, ureg_src(texel), palette);
  305.    }
  306.  
  307.    ureg_release_temporary(shader, texel);
  308.    ureg_END(shader);
  309.  
  310.    return ureg_create_shader_and_destroy(shader, c->pipe);
  311. }
  312.  
  313. static void *
  314. create_frag_shader_rgba(struct vl_compositor *c)
  315. {
  316.    struct ureg_program *shader;
  317.    struct ureg_src tc, color, sampler;
  318.    struct ureg_dst texel, fragment;
  319.  
  320.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  321.    if (!shader)
  322.       return false;
  323.  
  324.    tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
  325.    color = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR, TGSI_INTERPOLATE_LINEAR);
  326.    sampler = ureg_DECL_sampler(shader, 0);
  327.    texel = ureg_DECL_temporary(shader);
  328.    fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  329.  
  330.    /*
  331.     * fragment = tex(tc, sampler)
  332.     */
  333.    ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
  334.    ureg_MUL(shader, fragment, ureg_src(texel), color);
  335.    ureg_END(shader);
  336.  
  337.    return ureg_create_shader_and_destroy(shader, c->pipe);
  338. }
  339.  
  340. static bool
  341. init_shaders(struct vl_compositor *c)
  342. {
  343.    assert(c);
  344.  
  345.    c->vs = create_vert_shader(c);
  346.    if (!c->vs) {
  347.       debug_printf("Unable to create vertex shader.\n");
  348.       return false;
  349.    }
  350.  
  351.    c->fs_video_buffer = create_frag_shader_video_buffer(c);
  352.    if (!c->fs_video_buffer) {
  353.       debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
  354.       return false;
  355.    }
  356.  
  357.    c->fs_weave = create_frag_shader_weave(c);
  358.    if (!c->fs_weave) {
  359.       debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
  360.       return false;
  361.    }
  362.  
  363.    c->fs_palette.yuv = create_frag_shader_palette(c, true);
  364.    if (!c->fs_palette.yuv) {
  365.       debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
  366.       return false;
  367.    }
  368.  
  369.    c->fs_palette.rgb = create_frag_shader_palette(c, false);
  370.    if (!c->fs_palette.rgb) {
  371.       debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
  372.       return false;
  373.    }
  374.  
  375.    c->fs_rgba = create_frag_shader_rgba(c);
  376.    if (!c->fs_rgba) {
  377.       debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
  378.       return false;
  379.    }
  380.  
  381.    return true;
  382. }
  383.  
  384. static void cleanup_shaders(struct vl_compositor *c)
  385. {
  386.    assert(c);
  387.  
  388.    c->pipe->delete_vs_state(c->pipe, c->vs);
  389.    c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
  390.    c->pipe->delete_fs_state(c->pipe, c->fs_weave);
  391.    c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
  392.    c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
  393.    c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
  394. }
  395.  
  396. static bool
  397. init_pipe_state(struct vl_compositor *c)
  398. {
  399.    struct pipe_rasterizer_state rast;
  400.    struct pipe_sampler_state sampler;
  401.    struct pipe_blend_state blend;
  402.    struct pipe_depth_stencil_alpha_state dsa;
  403.    unsigned i;
  404.  
  405.    assert(c);
  406.  
  407.    c->fb_state.nr_cbufs = 1;
  408.    c->fb_state.zsbuf = NULL;
  409.  
  410.    memset(&sampler, 0, sizeof(sampler));
  411.    sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
  412.    sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
  413.    sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
  414.    sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
  415.    sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
  416.    sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
  417.    sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
  418.    sampler.compare_func = PIPE_FUNC_ALWAYS;
  419.    sampler.normalized_coords = 1;
  420.  
  421.    c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
  422.  
  423.    sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
  424.    sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
  425.    c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
  426.  
  427.    memset(&blend, 0, sizeof blend);
  428.    blend.independent_blend_enable = 0;
  429.    blend.rt[0].blend_enable = 0;
  430.    blend.logicop_enable = 0;
  431.    blend.logicop_func = PIPE_LOGICOP_CLEAR;
  432.    blend.rt[0].colormask = PIPE_MASK_RGBA;
  433.    blend.dither = 0;
  434.    c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
  435.  
  436.    blend.rt[0].blend_enable = 1;
  437.    blend.rt[0].rgb_func = PIPE_BLEND_ADD;
  438.    blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
  439.    blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
  440.    blend.rt[0].alpha_func = PIPE_BLEND_ADD;
  441.    blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
  442.    blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
  443.    c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
  444.  
  445.    memset(&rast, 0, sizeof rast);
  446.    rast.flatshade = 0;
  447.    rast.front_ccw = 1;
  448.    rast.cull_face = PIPE_FACE_NONE;
  449.    rast.fill_back = PIPE_POLYGON_MODE_FILL;
  450.    rast.fill_front = PIPE_POLYGON_MODE_FILL;
  451.    rast.scissor = 1;
  452.    rast.line_width = 1;
  453.    rast.point_size_per_vertex = 1;
  454.    rast.offset_units = 1;
  455.    rast.offset_scale = 1;
  456.    rast.half_pixel_center = 1;
  457.    rast.bottom_edge_rule = 1;
  458.    rast.depth_clip = 1;
  459.  
  460.    c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
  461.  
  462.    memset(&dsa, 0, sizeof dsa);
  463.    dsa.depth.enabled = 0;
  464.    dsa.depth.writemask = 0;
  465.    dsa.depth.func = PIPE_FUNC_ALWAYS;
  466.    for (i = 0; i < 2; ++i) {
  467.       dsa.stencil[i].enabled = 0;
  468.       dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
  469.       dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
  470.       dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
  471.       dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
  472.       dsa.stencil[i].valuemask = 0;
  473.       dsa.stencil[i].writemask = 0;
  474.    }
  475.    dsa.alpha.enabled = 0;
  476.    dsa.alpha.func = PIPE_FUNC_ALWAYS;
  477.    dsa.alpha.ref_value = 0;
  478.    c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
  479.    c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
  480.  
  481.    return true;
  482. }
  483.  
  484. static void cleanup_pipe_state(struct vl_compositor *c)
  485. {
  486.    assert(c);
  487.  
  488.    /* Asserted in softpipe_delete_fs_state() for some reason */
  489.    c->pipe->bind_vs_state(c->pipe, NULL);
  490.    c->pipe->bind_fs_state(c->pipe, NULL);
  491.  
  492.    c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
  493.    c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
  494.    c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
  495.    c->pipe->delete_blend_state(c->pipe, c->blend_clear);
  496.    c->pipe->delete_blend_state(c->pipe, c->blend_add);
  497.    c->pipe->delete_rasterizer_state(c->pipe, c->rast);
  498. }
  499.  
  500. static bool
  501. create_vertex_buffer(struct vl_compositor *c)
  502. {
  503.    assert(c);
  504.  
  505.    pipe_resource_reference(&c->vertex_buf.buffer, NULL);
  506.    c->vertex_buf.buffer = pipe_buffer_create
  507.    (
  508.       c->pipe->screen,
  509.       PIPE_BIND_VERTEX_BUFFER,
  510.       PIPE_USAGE_STREAM,
  511.       c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4
  512.    );
  513.  
  514.    return c->vertex_buf.buffer != NULL;
  515. }
  516.  
  517. static bool
  518. init_buffers(struct vl_compositor *c)
  519. {
  520.    struct pipe_vertex_element vertex_elems[3];
  521.  
  522.    assert(c);
  523.  
  524.    /*
  525.     * Create our vertex buffer and vertex buffer elements
  526.     */
  527.    c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
  528.    c->vertex_buf.buffer_offset = 0;
  529.    create_vertex_buffer(c);
  530.  
  531.    vertex_elems[0].src_offset = 0;
  532.    vertex_elems[0].instance_divisor = 0;
  533.    vertex_elems[0].vertex_buffer_index = 0;
  534.    vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
  535.    vertex_elems[1].src_offset = sizeof(struct vertex2f);
  536.    vertex_elems[1].instance_divisor = 0;
  537.    vertex_elems[1].vertex_buffer_index = 0;
  538.    vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
  539.    vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
  540.    vertex_elems[2].instance_divisor = 0;
  541.    vertex_elems[2].vertex_buffer_index = 0;
  542.    vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
  543.    c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
  544.  
  545.    return true;
  546. }
  547.  
  548. static void
  549. cleanup_buffers(struct vl_compositor *c)
  550. {
  551.    assert(c);
  552.  
  553.    c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
  554.    pipe_resource_reference(&c->vertex_buf.buffer, NULL);
  555. }
  556.  
  557. static INLINE struct u_rect
  558. default_rect(struct vl_compositor_layer *layer)
  559. {
  560.    struct pipe_resource *res = layer->sampler_views[0]->texture;
  561.    struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
  562.    return rect;
  563. }
  564.  
  565. static INLINE struct vertex2f
  566. calc_topleft(struct vertex2f size, struct u_rect rect)
  567. {
  568.    struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
  569.    return res;
  570. }
  571.  
  572. static INLINE struct vertex2f
  573. calc_bottomright(struct vertex2f size, struct u_rect rect)
  574. {
  575.    struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
  576.    return res;
  577. }
  578.  
  579. static INLINE void
  580. calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
  581.                  struct u_rect src, struct u_rect dst)
  582. {
  583.    struct vertex2f size =  { width, height };
  584.  
  585.    layer->src.tl = calc_topleft(size, src);
  586.    layer->src.br = calc_bottomright(size, src);
  587.    layer->dst.tl = calc_topleft(size, dst);
  588.    layer->dst.br = calc_bottomright(size, dst);
  589.    layer->zw.x = 0.0f;
  590.    layer->zw.y = size.y;
  591. }
  592.  
  593. static void
  594. gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer)
  595. {
  596.    assert(vb && layer);
  597.  
  598.    vb[ 0].x = layer->dst.tl.x;
  599.    vb[ 0].y = layer->dst.tl.y;
  600.    vb[ 1].x = layer->src.tl.x;
  601.    vb[ 1].y = layer->src.tl.y;
  602.    vb[ 2] = layer->zw;
  603.    vb[ 3].x = layer->colors[0].x;
  604.    vb[ 3].y = layer->colors[0].y;
  605.    vb[ 4].x = layer->colors[0].z;
  606.    vb[ 4].y = layer->colors[0].w;
  607.  
  608.    vb[ 5].x = layer->dst.br.x;
  609.    vb[ 5].y = layer->dst.tl.y;
  610.    vb[ 6].x = layer->src.br.x;
  611.    vb[ 6].y = layer->src.tl.y;
  612.    vb[ 7] = layer->zw;
  613.    vb[ 8].x = layer->colors[1].x;
  614.    vb[ 8].y = layer->colors[1].y;
  615.    vb[ 9].x = layer->colors[1].z;
  616.    vb[ 9].y = layer->colors[1].w;
  617.  
  618.    vb[10].x = layer->dst.br.x;
  619.    vb[10].y = layer->dst.br.y;
  620.    vb[11].x = layer->src.br.x;
  621.    vb[11].y = layer->src.br.y;
  622.    vb[12] = layer->zw;
  623.    vb[13].x = layer->colors[2].x;
  624.    vb[13].y = layer->colors[2].y;
  625.    vb[14].x = layer->colors[2].z;
  626.    vb[14].y = layer->colors[2].w;
  627.  
  628.    vb[15].x = layer->dst.tl.x;
  629.    vb[15].y = layer->dst.br.y;
  630.    vb[16].x = layer->src.tl.x;
  631.    vb[16].y = layer->src.br.y;
  632.    vb[17] = layer->zw;
  633.    vb[18].x = layer->colors[3].x;
  634.    vb[18].y = layer->colors[3].y;
  635.    vb[19].x = layer->colors[3].z;
  636.    vb[19].y = layer->colors[3].w;
  637. }
  638.  
  639. static INLINE struct u_rect
  640. calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer)
  641. {
  642.    struct u_rect result;
  643.  
  644.    // scale
  645.    result.x0 = layer->dst.tl.x * layer->viewport.scale[0] + layer->viewport.translate[0];
  646.    result.y0 = layer->dst.tl.y * layer->viewport.scale[1] + layer->viewport.translate[1];
  647.    result.x1 = layer->dst.br.x * layer->viewport.scale[0] + layer->viewport.translate[0];
  648.    result.y1 = layer->dst.br.y * layer->viewport.scale[1] + layer->viewport.translate[1];
  649.  
  650.    // and clip
  651.    result.x0 = MAX2(result.x0, s->scissor.minx);
  652.    result.y0 = MAX2(result.y0, s->scissor.miny);
  653.    result.x1 = MIN2(result.x1, s->scissor.maxx);
  654.    result.y1 = MIN2(result.y1, s->scissor.maxy);
  655.    return result;
  656. }
  657.  
  658. static void
  659. gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
  660. {
  661.    struct vertex2f *vb;
  662.    struct pipe_transfer *buf_transfer;
  663.    unsigned i;
  664.  
  665.    assert(c);
  666.  
  667.    vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
  668.                         PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DONTBLOCK,
  669.                         &buf_transfer);
  670.  
  671.    if (!vb) {
  672.       // If buffer is still locked from last draw create a new one
  673.       create_vertex_buffer(c);
  674.       vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
  675.                            PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
  676.                            &buf_transfer);
  677.    }
  678.  
  679.    for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
  680.       if (s->used_layers & (1 << i)) {
  681.          struct vl_compositor_layer *layer = &s->layers[i];
  682.          gen_rect_verts(vb, layer);
  683.          vb += 20;
  684.  
  685.          if (!layer->viewport_valid) {
  686.             layer->viewport.scale[0] = c->fb_state.width;
  687.             layer->viewport.scale[1] = c->fb_state.height;
  688.             layer->viewport.translate[0] = 0;
  689.             layer->viewport.translate[1] = 0;
  690.          }
  691.  
  692.          if (dirty && layer->clearing) {
  693.             struct u_rect drawn = calc_drawn_area(s, layer);
  694.             if (
  695.              dirty->x0 >= drawn.x0 &&
  696.              dirty->y0 >= drawn.y0 &&
  697.              dirty->x1 <= drawn.x1 &&
  698.              dirty->y1 <= drawn.y1) {
  699.  
  700.                // We clear the dirty area anyway, no need for clear_render_target
  701.                dirty->x0 = dirty->y0 = MAX_DIRTY;
  702.                dirty->x1 = dirty->y1 = MIN_DIRTY;
  703.             }
  704.          }
  705.       }
  706.    }
  707.  
  708.    pipe_buffer_unmap(c->pipe, buf_transfer);
  709. }
  710.  
  711. static void
  712. draw_layers(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
  713. {
  714.    unsigned vb_index, i;
  715.  
  716.    assert(c);
  717.  
  718.    for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
  719.       if (s->used_layers & (1 << i)) {
  720.          struct vl_compositor_layer *layer = &s->layers[i];
  721.          struct pipe_sampler_view **samplers = &layer->sampler_views[0];
  722.          unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
  723.          void *blend = layer->blend ? layer->blend : i ? c->blend_add : c->blend_clear;
  724.  
  725.          c->pipe->bind_blend_state(c->pipe, blend);
  726.          c->pipe->set_viewport_states(c->pipe, 0, 1, &layer->viewport);
  727.          c->pipe->bind_fs_state(c->pipe, layer->fs);
  728.          c->pipe->bind_fragment_sampler_states(c->pipe, num_sampler_views, layer->samplers);
  729.          c->pipe->set_fragment_sampler_views(c->pipe, num_sampler_views, samplers);
  730.          util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
  731.          vb_index++;
  732.  
  733.          if (dirty) {
  734.             // Remember the currently drawn area as dirty for the next draw command
  735.             struct u_rect drawn = calc_drawn_area(s, layer);
  736.             dirty->x0 = MIN2(drawn.x0, dirty->x0);
  737.             dirty->y0 = MIN2(drawn.y0, dirty->y0);
  738.             dirty->x1 = MAX2(drawn.x1, dirty->x1);
  739.             dirty->y1 = MAX2(drawn.y1, dirty->y1);
  740.          }
  741.       }
  742.    }
  743. }
  744.  
  745. void
  746. vl_compositor_reset_dirty_area(struct u_rect *dirty)
  747. {
  748.    assert(dirty);
  749.  
  750.    dirty->x0 = dirty->y0 = MIN_DIRTY;
  751.    dirty->x1 = dirty->y1 = MAX_DIRTY;
  752. }
  753.  
  754. void
  755. vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
  756. {
  757.    assert(s);
  758.    assert(color);
  759.  
  760.    s->clear_color = *color;
  761. }
  762.  
  763. void
  764. vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
  765. {
  766.    assert(s);
  767.    assert(color);
  768.  
  769.    *color = s->clear_color;
  770. }
  771.  
  772. void
  773. vl_compositor_clear_layers(struct vl_compositor_state *s)
  774. {
  775.    unsigned i, j;
  776.  
  777.    assert(s);
  778.  
  779.    s->used_layers = 0;
  780.    for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
  781.       struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
  782.       s->layers[i].clearing = i ? false : true;
  783.       s->layers[i].blend = NULL;
  784.       s->layers[i].fs = NULL;
  785.       s->layers[i].viewport.scale[2] = 1;
  786.       s->layers[i].viewport.scale[3] = 1;
  787.       s->layers[i].viewport.translate[2] = 0;
  788.       s->layers[i].viewport.translate[3] = 0;
  789.  
  790.       for ( j = 0; j < 3; j++)
  791.          pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
  792.       for ( j = 0; j < 4; ++j)
  793.          s->layers[i].colors[j] = v_one;
  794.    }
  795. }
  796.  
  797. void
  798. vl_compositor_cleanup(struct vl_compositor *c)
  799. {
  800.    assert(c);
  801.  
  802.    cleanup_buffers(c);
  803.    cleanup_shaders(c);
  804.    cleanup_pipe_state(c);
  805. }
  806.  
  807. void
  808. vl_compositor_set_csc_matrix(struct vl_compositor_state *s, vl_csc_matrix const *matrix)
  809. {
  810.    struct pipe_transfer *buf_transfer;
  811.  
  812.    assert(s);
  813.  
  814.    memcpy
  815.    (
  816.       pipe_buffer_map(s->pipe, s->csc_matrix,
  817.                       PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
  818.                       &buf_transfer),
  819.       matrix,
  820.       sizeof(vl_csc_matrix)
  821.    );
  822.  
  823.    pipe_buffer_unmap(s->pipe, buf_transfer);
  824. }
  825.  
  826. void
  827. vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
  828. {
  829.    assert(s);
  830.  
  831.    s->scissor_valid = dst_clip != NULL;
  832.    if (dst_clip) {
  833.       s->scissor.minx = dst_clip->x0;
  834.       s->scissor.miny = dst_clip->y0;
  835.       s->scissor.maxx = dst_clip->x1;
  836.       s->scissor.maxy = dst_clip->y1;
  837.    }
  838. }
  839.  
  840. void
  841. vl_compositor_set_layer_blend(struct vl_compositor_state *s,
  842.                               unsigned layer, void *blend,
  843.                               bool is_clearing)
  844. {
  845.    assert(s && blend);
  846.  
  847.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  848.  
  849.    s->layers[layer].clearing = is_clearing;
  850.    s->layers[layer].blend = blend;
  851. }
  852.  
  853. void
  854. vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
  855.                                  unsigned layer, struct u_rect *dst_area)
  856. {
  857.    assert(s);
  858.  
  859.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  860.  
  861.    s->layers[layer].viewport_valid = dst_area != NULL;
  862.    if (dst_area) {
  863.       s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
  864.       s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
  865.       s->layers[layer].viewport.translate[0] = dst_area->x0;
  866.       s->layers[layer].viewport.translate[1] = dst_area->y0;
  867.    }
  868. }
  869.  
  870. void
  871. vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
  872.                                struct vl_compositor *c,
  873.                                unsigned layer,
  874.                                struct pipe_video_buffer *buffer,
  875.                                struct u_rect *src_rect,
  876.                                struct u_rect *dst_rect,
  877.                                enum vl_compositor_deinterlace deinterlace)
  878. {
  879.    struct pipe_sampler_view **sampler_views;
  880.    unsigned i;
  881.  
  882.    assert(s && c && buffer);
  883.  
  884.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  885.  
  886.    s->used_layers |= 1 << layer;
  887.    sampler_views = buffer->get_sampler_view_components(buffer);
  888.    for (i = 0; i < 3; ++i) {
  889.       s->layers[layer].samplers[i] = c->sampler_linear;
  890.       pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
  891.    }
  892.  
  893.    calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
  894.                     src_rect ? *src_rect : default_rect(&s->layers[layer]),
  895.                     dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
  896.  
  897.    if (buffer->interlaced) {
  898.       float half_a_line = 0.5f / s->layers[layer].zw.y;
  899.       switch(deinterlace) {
  900.       case VL_COMPOSITOR_WEAVE:
  901.          s->layers[layer].fs = c->fs_weave;
  902.          break;
  903.  
  904.       case VL_COMPOSITOR_BOB_TOP:
  905.          s->layers[layer].zw.x = 0.0f;
  906.          s->layers[layer].src.tl.y += half_a_line;
  907.          s->layers[layer].src.br.y += half_a_line;
  908.          s->layers[layer].fs = c->fs_video_buffer;
  909.          break;
  910.  
  911.       case VL_COMPOSITOR_BOB_BOTTOM:
  912.          s->layers[layer].zw.x = 1.0f;
  913.          s->layers[layer].src.tl.y -= half_a_line;
  914.          s->layers[layer].src.br.y -= half_a_line;
  915.          s->layers[layer].fs = c->fs_video_buffer;
  916.          break;
  917.       }
  918.  
  919.    } else
  920.       s->layers[layer].fs = c->fs_video_buffer;
  921. }
  922.  
  923. void
  924. vl_compositor_set_palette_layer(struct vl_compositor_state *s,
  925.                                 struct vl_compositor *c,
  926.                                 unsigned layer,
  927.                                 struct pipe_sampler_view *indexes,
  928.                                 struct pipe_sampler_view *palette,
  929.                                 struct u_rect *src_rect,
  930.                                 struct u_rect *dst_rect,
  931.                                 bool include_color_conversion)
  932. {
  933.    assert(s && c && indexes && palette);
  934.  
  935.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  936.  
  937.    s->used_layers |= 1 << layer;
  938.  
  939.    s->layers[layer].fs = include_color_conversion ?
  940.       c->fs_palette.yuv : c->fs_palette.rgb;
  941.  
  942.    s->layers[layer].samplers[0] = c->sampler_linear;
  943.    s->layers[layer].samplers[1] = c->sampler_nearest;
  944.    s->layers[layer].samplers[2] = NULL;
  945.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
  946.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
  947.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
  948.    calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
  949.                     src_rect ? *src_rect : default_rect(&s->layers[layer]),
  950.                     dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
  951. }
  952.  
  953. void
  954. vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
  955.                              struct vl_compositor *c,
  956.                              unsigned layer,
  957.                              struct pipe_sampler_view *rgba,
  958.                              struct u_rect *src_rect,
  959.                              struct u_rect *dst_rect,
  960.                              struct vertex4f *colors)
  961. {
  962.    unsigned i;
  963.  
  964.    assert(s && c && rgba);
  965.  
  966.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  967.  
  968.    s->used_layers |= 1 << layer;
  969.    s->layers[layer].fs = c->fs_rgba;
  970.    s->layers[layer].samplers[0] = c->sampler_linear;
  971.    s->layers[layer].samplers[1] = NULL;
  972.    s->layers[layer].samplers[2] = NULL;
  973.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
  974.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
  975.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
  976.    calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
  977.                     src_rect ? *src_rect : default_rect(&s->layers[layer]),
  978.                     dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
  979.  
  980.    if (colors)
  981.       for (i = 0; i < 4; ++i)
  982.          s->layers[layer].colors[i] = colors[i];
  983. }
  984.  
  985. void
  986. vl_compositor_render(struct vl_compositor_state *s,
  987.                      struct vl_compositor       *c,
  988.                      struct pipe_surface        *dst_surface,
  989.                      struct u_rect              *dirty_area,
  990.                      bool                        clear_dirty)
  991. {
  992.    assert(c);
  993.    assert(dst_surface);
  994.  
  995.    c->fb_state.width = dst_surface->width;
  996.    c->fb_state.height = dst_surface->height;
  997.    c->fb_state.cbufs[0] = dst_surface;
  998.    
  999.    if (!s->scissor_valid) {
  1000.       s->scissor.minx = 0;
  1001.       s->scissor.miny = 0;
  1002.       s->scissor.maxx = dst_surface->width;
  1003.       s->scissor.maxy = dst_surface->height;
  1004.    }
  1005.  
  1006.    gen_vertex_data(c, s, dirty_area);
  1007.  
  1008.    if (clear_dirty && dirty_area &&
  1009.        (dirty_area->x0 < dirty_area->x1 || dirty_area->y0 < dirty_area->y1)) {
  1010.  
  1011.       c->pipe->clear_render_target(c->pipe, dst_surface, &s->clear_color,
  1012.                                    0, 0, dst_surface->width, dst_surface->height);
  1013.       dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
  1014.       dirty_area->x1 = dirty_area->y1 = MIN_DIRTY;
  1015.    }
  1016.  
  1017.    c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
  1018.    c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
  1019.    c->pipe->bind_vs_state(c->pipe, c->vs);
  1020.    c->pipe->set_vertex_buffers(c->pipe, 0, 1, &c->vertex_buf);
  1021.    c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
  1022.    pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->csc_matrix);
  1023.    c->pipe->bind_rasterizer_state(c->pipe, c->rast);
  1024.  
  1025.    draw_layers(c, s, dirty_area);
  1026. }
  1027.  
  1028. bool
  1029. vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
  1030. {
  1031.    assert(c);
  1032.  
  1033.    memset(c, 0, sizeof(*c));
  1034.  
  1035.    c->pipe = pipe;
  1036.  
  1037.    if (!init_pipe_state(c))
  1038.       return false;
  1039.  
  1040.    if (!init_shaders(c)) {
  1041.       cleanup_pipe_state(c);
  1042.       return false;
  1043.    }
  1044.  
  1045.    if (!init_buffers(c)) {
  1046.       cleanup_shaders(c);
  1047.       cleanup_pipe_state(c);
  1048.       return false;
  1049.    }
  1050.  
  1051.    return true;
  1052. }
  1053.  
  1054. bool
  1055. vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
  1056. {
  1057.    vl_csc_matrix csc_matrix;
  1058.  
  1059.    assert(s);
  1060.  
  1061.    memset(s, 0, sizeof(*s));
  1062.  
  1063.    s->pipe = pipe;
  1064.  
  1065.    s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
  1066.    s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
  1067.  
  1068.    /*
  1069.     * Create our fragment shader's constant buffer
  1070.     * Const buffer contains the color conversion matrix and bias vectors
  1071.     */
  1072.    /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
  1073.    s->csc_matrix = pipe_buffer_create
  1074.    (
  1075.       pipe->screen,
  1076.       PIPE_BIND_CONSTANT_BUFFER,
  1077.       PIPE_USAGE_STATIC,
  1078.       sizeof(csc_matrix)
  1079.    );
  1080.  
  1081.    vl_compositor_clear_layers(s);
  1082.  
  1083.    vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
  1084.    vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix);
  1085.  
  1086.    return true;
  1087. }
  1088.  
  1089. void
  1090. vl_compositor_cleanup_state(struct vl_compositor_state *s)
  1091. {
  1092.    assert(s);
  1093.  
  1094.    vl_compositor_clear_layers(s);
  1095.    pipe_resource_reference(&s->csc_matrix, NULL);
  1096. }
  1097.