Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2009 Younes Manton.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include <assert.h>
  29.  
  30. #include "pipe/p_compiler.h"
  31. #include "pipe/p_context.h"
  32.  
  33. #include "util/u_memory.h"
  34. #include "util/u_draw.h"
  35. #include "util/u_surface.h"
  36. #include "util/u_upload_mgr.h"
  37.  
  38. #include "tgsi/tgsi_ureg.h"
  39.  
  40. #include "vl_csc.h"
  41. #include "vl_types.h"
  42. #include "vl_compositor.h"
  43.  
  44. #define MIN_DIRTY (0)
  45. #define MAX_DIRTY (1 << 15)
  46.  
  47. enum VS_OUTPUT
  48. {
  49.    VS_O_VPOS = 0,
  50.    VS_O_COLOR = 0,
  51.    VS_O_VTEX = 0,
  52.    VS_O_VTOP,
  53.    VS_O_VBOTTOM,
  54. };
  55.  
  56. static void *
  57. create_vert_shader(struct vl_compositor *c)
  58. {
  59.    struct ureg_program *shader;
  60.    struct ureg_src vpos, vtex, color;
  61.    struct ureg_dst tmp;
  62.    struct ureg_dst o_vpos, o_vtex, o_color;
  63.    struct ureg_dst o_vtop, o_vbottom;
  64.  
  65.    shader = ureg_create(TGSI_PROCESSOR_VERTEX);
  66.    if (!shader)
  67.       return false;
  68.  
  69.    vpos = ureg_DECL_vs_input(shader, 0);
  70.    vtex = ureg_DECL_vs_input(shader, 1);
  71.    color = ureg_DECL_vs_input(shader, 2);
  72.    tmp = ureg_DECL_temporary(shader);
  73.    o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
  74.    o_color = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR);
  75.    o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX);
  76.    o_vtop = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP);
  77.    o_vbottom = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM);
  78.  
  79.    /*
  80.     * o_vpos = vpos
  81.     * o_vtex = vtex
  82.     * o_color = color
  83.     */
  84.    ureg_MOV(shader, o_vpos, vpos);
  85.    ureg_MOV(shader, o_vtex, vtex);
  86.    ureg_MOV(shader, o_color, color);
  87.  
  88.    /*
  89.     * tmp.x = vtex.w / 2
  90.     * tmp.y = vtex.w / 4
  91.     *
  92.     * o_vtop.x = vtex.x
  93.     * o_vtop.y = vtex.y * tmp.x + 0.25f
  94.     * o_vtop.z = vtex.y * tmp.y + 0.25f
  95.     * o_vtop.w = 1 / tmp.x
  96.     *
  97.     * o_vbottom.x = vtex.x
  98.     * o_vbottom.y = vtex.y * tmp.x - 0.25f
  99.     * o_vbottom.z = vtex.y * tmp.y - 0.25f
  100.     * o_vbottom.w = 1 / tmp.y
  101.     */
  102.    ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X),
  103.             ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.5f));
  104.    ureg_MUL(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y),
  105.             ureg_scalar(vtex, TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.25f));
  106.  
  107.    ureg_MOV(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_X), vtex);
  108.    ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  109.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, 0.25f));
  110.    ureg_MAD(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  111.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, 0.25f));
  112.    ureg_RCP(shader, ureg_writemask(o_vtop, TGSI_WRITEMASK_W),
  113.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X));
  114.  
  115.    ureg_MOV(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_X), vtex);
  116.    ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Y), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  117.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X), ureg_imm1f(shader, -0.25f));
  118.    ureg_MAD(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_Z), ureg_scalar(vtex, TGSI_SWIZZLE_Y),
  119.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y), ureg_imm1f(shader, -0.25f));
  120.    ureg_RCP(shader, ureg_writemask(o_vbottom, TGSI_WRITEMASK_W),
  121.             ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
  122.  
  123.    ureg_END(shader);
  124.  
  125.    return ureg_create_shader_and_destroy(shader, c->pipe);
  126. }
  127.  
  128. static void *
  129. create_frag_shader_video_buffer(struct vl_compositor *c)
  130. {
  131.    struct ureg_program *shader;
  132.    struct ureg_src tc;
  133.    struct ureg_src csc[3];
  134.    struct ureg_src sampler[3];
  135.    struct ureg_dst texel;
  136.    struct ureg_dst fragment;
  137.    unsigned i;
  138.  
  139.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  140.    if (!shader)
  141.       return false;
  142.  
  143.    tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
  144.    for (i = 0; i < 3; ++i) {
  145.       csc[i] = ureg_DECL_constant(shader, i);
  146.       sampler[i] = ureg_DECL_sampler(shader, i);
  147.    }
  148.    texel = ureg_DECL_temporary(shader);
  149.    fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  150.  
  151.    /*
  152.     * texel.xyz = tex(tc, sampler[i])
  153.     * fragment = csc * texel
  154.     */
  155.    for (i = 0; i < 3; ++i)
  156.       ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_2D_ARRAY, tc, sampler[i]);
  157.  
  158.    ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  159.  
  160.    for (i = 0; i < 3; ++i)
  161.       ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
  162.  
  163.    ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  164.  
  165.    ureg_release_temporary(shader, texel);
  166.    ureg_END(shader);
  167.  
  168.    return ureg_create_shader_and_destroy(shader, c->pipe);
  169. }
  170.  
  171. static void *
  172. create_frag_shader_weave(struct vl_compositor *c)
  173. {
  174.    struct ureg_program *shader;
  175.    struct ureg_src i_tc[2];
  176.    struct ureg_src csc[3];
  177.    struct ureg_src sampler[3];
  178.    struct ureg_dst t_tc[2];
  179.    struct ureg_dst t_texel[2];
  180.    struct ureg_dst o_fragment;
  181.    unsigned i, j;
  182.  
  183.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  184.    if (!shader)
  185.       return false;
  186.  
  187.    i_tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTOP, TGSI_INTERPOLATE_LINEAR);
  188.    i_tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VBOTTOM, TGSI_INTERPOLATE_LINEAR);
  189.  
  190.    for (i = 0; i < 3; ++i) {
  191.       csc[i] = ureg_DECL_constant(shader, i);
  192.       sampler[i] = ureg_DECL_sampler(shader, i);
  193.    }
  194.  
  195.    for (i = 0; i < 2; ++i) {
  196.       t_tc[i] = ureg_DECL_temporary(shader);
  197.       t_texel[i] = ureg_DECL_temporary(shader);
  198.    }
  199.    o_fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  200.  
  201.    /* calculate the texture offsets
  202.     * t_tc.x = i_tc.x
  203.     * t_tc.y = (round(i_tc.y - 0.5) + 0.5) / height * 2
  204.     */
  205.    for (i = 0; i < 2; ++i) {
  206.       ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_X), i_tc[i]);
  207.       ureg_SUB(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
  208.                i_tc[i], ureg_imm1f(shader, 0.5f));
  209.       ureg_ROUND(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ), ureg_src(t_tc[i]));
  210.       ureg_MOV(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_W),
  211.                ureg_imm1f(shader, i ? 1.0f : 0.0f));
  212.       ureg_ADD(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_YZ),
  213.                ureg_src(t_tc[i]), ureg_imm1f(shader, 0.5f));
  214.       ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Y),
  215.                ureg_src(t_tc[i]), ureg_scalar(i_tc[0], TGSI_SWIZZLE_W));
  216.       ureg_MUL(shader, ureg_writemask(t_tc[i], TGSI_WRITEMASK_Z),
  217.                ureg_src(t_tc[i]), ureg_scalar(i_tc[1], TGSI_SWIZZLE_W));
  218.    }
  219.  
  220.    /* fetch the texels
  221.     * texel[0..1].x = tex(t_tc[0..1][0])
  222.     * texel[0..1].y = tex(t_tc[0..1][1])
  223.     * texel[0..1].z = tex(t_tc[0..1][2])
  224.     */
  225.    for (i = 0; i < 2; ++i)
  226.       for (j = 0; j < 3; ++j) {
  227.          struct ureg_src src = ureg_swizzle(ureg_src(t_tc[i]),
  228.             TGSI_SWIZZLE_X, j ? TGSI_SWIZZLE_Z : TGSI_SWIZZLE_Y, TGSI_SWIZZLE_W, TGSI_SWIZZLE_W);
  229.  
  230.          ureg_TEX(shader, ureg_writemask(t_texel[i], TGSI_WRITEMASK_X << j),
  231.                   TGSI_TEXTURE_2D_ARRAY, src, sampler[j]);
  232.       }
  233.  
  234.    /* calculate linear interpolation factor
  235.     * factor = |round(i_tc.y) - i_tc.y| * 2
  236.     */
  237.    ureg_ROUND(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ), i_tc[0]);
  238.    ureg_ADD(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
  239.             ureg_src(t_tc[0]), ureg_negate(i_tc[0]));
  240.    ureg_MUL(shader, ureg_writemask(t_tc[0], TGSI_WRITEMASK_YZ),
  241.             ureg_abs(ureg_src(t_tc[0])), ureg_imm1f(shader, 2.0f));
  242.    ureg_LRP(shader, t_texel[0], ureg_swizzle(ureg_src(t_tc[0]),
  243.             TGSI_SWIZZLE_Y, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z, TGSI_SWIZZLE_Z),
  244.             ureg_src(t_texel[0]), ureg_src(t_texel[1]));
  245.  
  246.    /* and finally do colour space transformation
  247.     * fragment = csc * texel
  248.     */
  249.    ureg_MOV(shader, ureg_writemask(t_texel[0], TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  250.    for (i = 0; i < 3; ++i)
  251.       ureg_DP4(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(t_texel[0]));
  252.  
  253.    ureg_MOV(shader, ureg_writemask(o_fragment, TGSI_WRITEMASK_W), ureg_imm1f(shader, 1.0f));
  254.  
  255.    for (i = 0; i < 2; ++i) {
  256.       ureg_release_temporary(shader, t_texel[i]);
  257.       ureg_release_temporary(shader, t_tc[i]);
  258.    }
  259.  
  260.    ureg_END(shader);
  261.  
  262.    return ureg_create_shader_and_destroy(shader, c->pipe);
  263. }
  264.  
  265. static void *
  266. create_frag_shader_palette(struct vl_compositor *c, bool include_cc)
  267. {
  268.    struct ureg_program *shader;
  269.    struct ureg_src csc[3];
  270.    struct ureg_src tc;
  271.    struct ureg_src sampler;
  272.    struct ureg_src palette;
  273.    struct ureg_dst texel;
  274.    struct ureg_dst fragment;
  275.    unsigned i;
  276.  
  277.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  278.    if (!shader)
  279.       return false;
  280.  
  281.    for (i = 0; include_cc && i < 3; ++i)
  282.       csc[i] = ureg_DECL_constant(shader, i);
  283.  
  284.    tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
  285.    sampler = ureg_DECL_sampler(shader, 0);
  286.    palette = ureg_DECL_sampler(shader, 1);
  287.  
  288.    texel = ureg_DECL_temporary(shader);
  289.    fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  290.  
  291.    /*
  292.     * texel = tex(tc, sampler)
  293.     * fragment.xyz = tex(texel, palette) * csc
  294.     * fragment.a = texel.a
  295.     */
  296.    ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
  297.    ureg_MOV(shader, ureg_writemask(fragment, TGSI_WRITEMASK_W), ureg_src(texel));
  298.  
  299.    if (include_cc) {
  300.       ureg_TEX(shader, texel, TGSI_TEXTURE_1D, ureg_src(texel), palette);
  301.       for (i = 0; i < 3; ++i)
  302.          ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
  303.    } else {
  304.       ureg_TEX(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ),
  305.                TGSI_TEXTURE_1D, ureg_src(texel), palette);
  306.    }
  307.  
  308.    ureg_release_temporary(shader, texel);
  309.    ureg_END(shader);
  310.  
  311.    return ureg_create_shader_and_destroy(shader, c->pipe);
  312. }
  313.  
  314. static void *
  315. create_frag_shader_rgba(struct vl_compositor *c)
  316. {
  317.    struct ureg_program *shader;
  318.    struct ureg_src tc, color, sampler;
  319.    struct ureg_dst texel, fragment;
  320.  
  321.    shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
  322.    if (!shader)
  323.       return false;
  324.  
  325.    tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_VTEX, TGSI_INTERPOLATE_LINEAR);
  326.    color = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_COLOR, VS_O_COLOR, TGSI_INTERPOLATE_LINEAR);
  327.    sampler = ureg_DECL_sampler(shader, 0);
  328.    texel = ureg_DECL_temporary(shader);
  329.    fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
  330.  
  331.    /*
  332.     * fragment = tex(tc, sampler)
  333.     */
  334.    ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
  335.    ureg_MUL(shader, fragment, ureg_src(texel), color);
  336.    ureg_END(shader);
  337.  
  338.    return ureg_create_shader_and_destroy(shader, c->pipe);
  339. }
  340.  
  341. static bool
  342. init_shaders(struct vl_compositor *c)
  343. {
  344.    assert(c);
  345.  
  346.    c->vs = create_vert_shader(c);
  347.    if (!c->vs) {
  348.       debug_printf("Unable to create vertex shader.\n");
  349.       return false;
  350.    }
  351.  
  352.    c->fs_video_buffer = create_frag_shader_video_buffer(c);
  353.    if (!c->fs_video_buffer) {
  354.       debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
  355.       return false;
  356.    }
  357.  
  358.    c->fs_weave = create_frag_shader_weave(c);
  359.    if (!c->fs_weave) {
  360.       debug_printf("Unable to create YCbCr-to-RGB weave fragment shader.\n");
  361.       return false;
  362.    }
  363.  
  364.    c->fs_palette.yuv = create_frag_shader_palette(c, true);
  365.    if (!c->fs_palette.yuv) {
  366.       debug_printf("Unable to create YUV-Palette-to-RGB fragment shader.\n");
  367.       return false;
  368.    }
  369.  
  370.    c->fs_palette.rgb = create_frag_shader_palette(c, false);
  371.    if (!c->fs_palette.rgb) {
  372.       debug_printf("Unable to create RGB-Palette-to-RGB fragment shader.\n");
  373.       return false;
  374.    }
  375.  
  376.    c->fs_rgba = create_frag_shader_rgba(c);
  377.    if (!c->fs_rgba) {
  378.       debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
  379.       return false;
  380.    }
  381.  
  382.    return true;
  383. }
  384.  
  385. static void cleanup_shaders(struct vl_compositor *c)
  386. {
  387.    assert(c);
  388.  
  389.    c->pipe->delete_vs_state(c->pipe, c->vs);
  390.    c->pipe->delete_fs_state(c->pipe, c->fs_video_buffer);
  391.    c->pipe->delete_fs_state(c->pipe, c->fs_weave);
  392.    c->pipe->delete_fs_state(c->pipe, c->fs_palette.yuv);
  393.    c->pipe->delete_fs_state(c->pipe, c->fs_palette.rgb);
  394.    c->pipe->delete_fs_state(c->pipe, c->fs_rgba);
  395. }
  396.  
  397. static bool
  398. init_pipe_state(struct vl_compositor *c)
  399. {
  400.    struct pipe_rasterizer_state rast;
  401.    struct pipe_sampler_state sampler;
  402.    struct pipe_blend_state blend;
  403.    struct pipe_depth_stencil_alpha_state dsa;
  404.    unsigned i;
  405.  
  406.    assert(c);
  407.  
  408.    c->fb_state.nr_cbufs = 1;
  409.    c->fb_state.zsbuf = NULL;
  410.  
  411.    memset(&sampler, 0, sizeof(sampler));
  412.    sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
  413.    sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
  414.    sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
  415.    sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
  416.    sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
  417.    sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
  418.    sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
  419.    sampler.compare_func = PIPE_FUNC_ALWAYS;
  420.    sampler.normalized_coords = 1;
  421.  
  422.    c->sampler_linear = c->pipe->create_sampler_state(c->pipe, &sampler);
  423.  
  424.    sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
  425.    sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
  426.    c->sampler_nearest = c->pipe->create_sampler_state(c->pipe, &sampler);
  427.  
  428.    memset(&blend, 0, sizeof blend);
  429.    blend.independent_blend_enable = 0;
  430.    blend.rt[0].blend_enable = 0;
  431.    blend.logicop_enable = 0;
  432.    blend.logicop_func = PIPE_LOGICOP_CLEAR;
  433.    blend.rt[0].colormask = PIPE_MASK_RGBA;
  434.    blend.dither = 0;
  435.    c->blend_clear = c->pipe->create_blend_state(c->pipe, &blend);
  436.  
  437.    blend.rt[0].blend_enable = 1;
  438.    blend.rt[0].rgb_func = PIPE_BLEND_ADD;
  439.    blend.rt[0].rgb_src_factor = PIPE_BLENDFACTOR_SRC_ALPHA;
  440.    blend.rt[0].rgb_dst_factor = PIPE_BLENDFACTOR_INV_SRC_ALPHA;
  441.    blend.rt[0].alpha_func = PIPE_BLEND_ADD;
  442.    blend.rt[0].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
  443.    blend.rt[0].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
  444.    c->blend_add = c->pipe->create_blend_state(c->pipe, &blend);
  445.  
  446.    memset(&rast, 0, sizeof rast);
  447.    rast.flatshade = 0;
  448.    rast.front_ccw = 1;
  449.    rast.cull_face = PIPE_FACE_NONE;
  450.    rast.fill_back = PIPE_POLYGON_MODE_FILL;
  451.    rast.fill_front = PIPE_POLYGON_MODE_FILL;
  452.    rast.scissor = 1;
  453.    rast.line_width = 1;
  454.    rast.point_size_per_vertex = 1;
  455.    rast.offset_units = 1;
  456.    rast.offset_scale = 1;
  457.    rast.half_pixel_center = 1;
  458.    rast.bottom_edge_rule = 1;
  459.    rast.depth_clip = 1;
  460.  
  461.    c->rast = c->pipe->create_rasterizer_state(c->pipe, &rast);
  462.  
  463.    memset(&dsa, 0, sizeof dsa);
  464.    dsa.depth.enabled = 0;
  465.    dsa.depth.writemask = 0;
  466.    dsa.depth.func = PIPE_FUNC_ALWAYS;
  467.    for (i = 0; i < 2; ++i) {
  468.       dsa.stencil[i].enabled = 0;
  469.       dsa.stencil[i].func = PIPE_FUNC_ALWAYS;
  470.       dsa.stencil[i].fail_op = PIPE_STENCIL_OP_KEEP;
  471.       dsa.stencil[i].zpass_op = PIPE_STENCIL_OP_KEEP;
  472.       dsa.stencil[i].zfail_op = PIPE_STENCIL_OP_KEEP;
  473.       dsa.stencil[i].valuemask = 0;
  474.       dsa.stencil[i].writemask = 0;
  475.    }
  476.    dsa.alpha.enabled = 0;
  477.    dsa.alpha.func = PIPE_FUNC_ALWAYS;
  478.    dsa.alpha.ref_value = 0;
  479.    c->dsa = c->pipe->create_depth_stencil_alpha_state(c->pipe, &dsa);
  480.    c->pipe->bind_depth_stencil_alpha_state(c->pipe, c->dsa);
  481.  
  482.    return true;
  483. }
  484.  
  485. static void cleanup_pipe_state(struct vl_compositor *c)
  486. {
  487.    assert(c);
  488.  
  489.    /* Asserted in softpipe_delete_fs_state() for some reason */
  490.    c->pipe->bind_vs_state(c->pipe, NULL);
  491.    c->pipe->bind_fs_state(c->pipe, NULL);
  492.  
  493.    c->pipe->delete_depth_stencil_alpha_state(c->pipe, c->dsa);
  494.    c->pipe->delete_sampler_state(c->pipe, c->sampler_linear);
  495.    c->pipe->delete_sampler_state(c->pipe, c->sampler_nearest);
  496.    c->pipe->delete_blend_state(c->pipe, c->blend_clear);
  497.    c->pipe->delete_blend_state(c->pipe, c->blend_add);
  498.    c->pipe->delete_rasterizer_state(c->pipe, c->rast);
  499. }
  500.  
  501. static bool
  502. init_buffers(struct vl_compositor *c)
  503. {
  504.    struct pipe_vertex_element vertex_elems[3];
  505.  
  506.    assert(c);
  507.  
  508.    /*
  509.     * Create our vertex buffer and vertex buffer elements
  510.     */
  511.    c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
  512.    c->vertex_buf.buffer_offset = 0;
  513.    c->vertex_buf.buffer = NULL;
  514.  
  515.    vertex_elems[0].src_offset = 0;
  516.    vertex_elems[0].instance_divisor = 0;
  517.    vertex_elems[0].vertex_buffer_index = 0;
  518.    vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
  519.    vertex_elems[1].src_offset = sizeof(struct vertex2f);
  520.    vertex_elems[1].instance_divisor = 0;
  521.    vertex_elems[1].vertex_buffer_index = 0;
  522.    vertex_elems[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
  523.    vertex_elems[2].src_offset = sizeof(struct vertex2f) + sizeof(struct vertex4f);
  524.    vertex_elems[2].instance_divisor = 0;
  525.    vertex_elems[2].vertex_buffer_index = 0;
  526.    vertex_elems[2].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
  527.    c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 3, vertex_elems);
  528.  
  529.    return true;
  530. }
  531.  
  532. static void
  533. cleanup_buffers(struct vl_compositor *c)
  534. {
  535.    assert(c);
  536.  
  537.    c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
  538.    pipe_resource_reference(&c->vertex_buf.buffer, NULL);
  539. }
  540.  
  541. static INLINE struct u_rect
  542. default_rect(struct vl_compositor_layer *layer)
  543. {
  544.    struct pipe_resource *res = layer->sampler_views[0]->texture;
  545.    struct u_rect rect = { 0, res->width0, 0, res->height0 * res->array_size };
  546.    return rect;
  547. }
  548.  
  549. static INLINE struct vertex2f
  550. calc_topleft(struct vertex2f size, struct u_rect rect)
  551. {
  552.    struct vertex2f res = { rect.x0 / size.x, rect.y0 / size.y };
  553.    return res;
  554. }
  555.  
  556. static INLINE struct vertex2f
  557. calc_bottomright(struct vertex2f size, struct u_rect rect)
  558. {
  559.    struct vertex2f res = { rect.x1 / size.x, rect.y1 / size.y };
  560.    return res;
  561. }
  562.  
  563. static INLINE void
  564. calc_src_and_dst(struct vl_compositor_layer *layer, unsigned width, unsigned height,
  565.                  struct u_rect src, struct u_rect dst)
  566. {
  567.    struct vertex2f size =  { width, height };
  568.  
  569.    layer->src.tl = calc_topleft(size, src);
  570.    layer->src.br = calc_bottomright(size, src);
  571.    layer->dst.tl = calc_topleft(size, dst);
  572.    layer->dst.br = calc_bottomright(size, dst);
  573.    layer->zw.x = 0.0f;
  574.    layer->zw.y = size.y;
  575. }
  576.  
  577. static void
  578. gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer)
  579. {
  580.    struct vertex2f tl, tr, br, bl;
  581.  
  582.    assert(vb && layer);
  583.  
  584.    switch (layer->rotate) {
  585.    default:
  586.    case VL_COMPOSITOR_ROTATE_0:
  587.       tl = layer->dst.tl;
  588.       tr.x = layer->dst.br.x;
  589.       tr.y = layer->dst.tl.y;
  590.       br = layer->dst.br;
  591.       bl.x = layer->dst.tl.x;
  592.       bl.y = layer->dst.br.y;
  593.       break;
  594.    case VL_COMPOSITOR_ROTATE_90:
  595.       tl.x = layer->dst.br.x;
  596.       tl.y = layer->dst.tl.y;
  597.       tr = layer->dst.br;
  598.       br.x = layer->dst.tl.x;
  599.       br.y = layer->dst.br.y;
  600.       bl = layer->dst.tl;
  601.       break;
  602.    case VL_COMPOSITOR_ROTATE_180:
  603.       tl = layer->dst.br;
  604.       tr.x = layer->dst.tl.x;
  605.       tr.y = layer->dst.br.y;
  606.       br = layer->dst.tl;
  607.       bl.x = layer->dst.br.x;
  608.       bl.y = layer->dst.tl.y;
  609.       break;
  610.    case VL_COMPOSITOR_ROTATE_270:
  611.       tl.x = layer->dst.tl.x;
  612.       tl.y = layer->dst.br.y;
  613.       tr = layer->dst.tl;
  614.       br.x = layer->dst.br.x;
  615.       br.y = layer->dst.tl.y;
  616.       bl = layer->dst.br;
  617.       break;
  618.    }
  619.  
  620.    vb[ 0].x = tl.x;
  621.    vb[ 0].y = tl.y;
  622.    vb[ 1].x = layer->src.tl.x;
  623.    vb[ 1].y = layer->src.tl.y;
  624.    vb[ 2] = layer->zw;
  625.    vb[ 3].x = layer->colors[0].x;
  626.    vb[ 3].y = layer->colors[0].y;
  627.    vb[ 4].x = layer->colors[0].z;
  628.    vb[ 4].y = layer->colors[0].w;
  629.  
  630.    vb[ 5].x = tr.x;
  631.    vb[ 5].y = tr.y;
  632.    vb[ 6].x = layer->src.br.x;
  633.    vb[ 6].y = layer->src.tl.y;
  634.    vb[ 7] = layer->zw;
  635.    vb[ 8].x = layer->colors[1].x;
  636.    vb[ 8].y = layer->colors[1].y;
  637.    vb[ 9].x = layer->colors[1].z;
  638.    vb[ 9].y = layer->colors[1].w;
  639.  
  640.    vb[10].x = br.x;
  641.    vb[10].y = br.y;
  642.    vb[11].x = layer->src.br.x;
  643.    vb[11].y = layer->src.br.y;
  644.    vb[12] = layer->zw;
  645.    vb[13].x = layer->colors[2].x;
  646.    vb[13].y = layer->colors[2].y;
  647.    vb[14].x = layer->colors[2].z;
  648.    vb[14].y = layer->colors[2].w;
  649.  
  650.    vb[15].x = bl.x;
  651.    vb[15].y = bl.y;
  652.    vb[16].x = layer->src.tl.x;
  653.    vb[16].y = layer->src.br.y;
  654.    vb[17] = layer->zw;
  655.    vb[18].x = layer->colors[3].x;
  656.    vb[18].y = layer->colors[3].y;
  657.    vb[19].x = layer->colors[3].z;
  658.    vb[19].y = layer->colors[3].w;
  659. }
  660.  
  661. static INLINE struct u_rect
  662. calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer)
  663. {
  664.    struct vertex2f tl, br;
  665.    struct u_rect result;
  666.  
  667.    assert(s && layer);
  668.  
  669.    // rotate
  670.    switch (layer->rotate) {
  671.    default:
  672.    case VL_COMPOSITOR_ROTATE_0:
  673.       tl = layer->dst.tl;
  674.       br = layer->dst.br;
  675.       break;
  676.    case VL_COMPOSITOR_ROTATE_90:
  677.       tl.x = layer->dst.br.x;
  678.       tl.y = layer->dst.tl.y;
  679.       br.x = layer->dst.tl.x;
  680.       br.y = layer->dst.br.y;
  681.       break;
  682.    case VL_COMPOSITOR_ROTATE_180:
  683.       tl = layer->dst.br;
  684.       br = layer->dst.tl;
  685.       break;
  686.    case VL_COMPOSITOR_ROTATE_270:
  687.       tl.x = layer->dst.tl.x;
  688.       tl.y = layer->dst.br.y;
  689.       br.x = layer->dst.br.x;
  690.       br.y = layer->dst.tl.y;
  691.       break;
  692.    }
  693.  
  694.    // scale
  695.    result.x0 = tl.x * layer->viewport.scale[0] + layer->viewport.translate[0];
  696.    result.y0 = tl.y * layer->viewport.scale[1] + layer->viewport.translate[1];
  697.    result.x1 = br.x * layer->viewport.scale[0] + layer->viewport.translate[0];
  698.    result.y1 = br.y * layer->viewport.scale[1] + layer->viewport.translate[1];
  699.  
  700.    // and clip
  701.    result.x0 = MAX2(result.x0, s->scissor.minx);
  702.    result.y0 = MAX2(result.y0, s->scissor.miny);
  703.    result.x1 = MIN2(result.x1, s->scissor.maxx);
  704.    result.y1 = MIN2(result.y1, s->scissor.maxy);
  705.    return result;
  706. }
  707.  
  708. static void
  709. gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
  710. {
  711.    struct vertex2f *vb;
  712.    unsigned i;
  713.  
  714.    assert(c);
  715.  
  716.    /* Allocate new memory for vertices. */
  717.    u_upload_alloc(c->upload, 0,
  718.                   c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
  719.                   &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer,
  720.                   (void**)&vb);
  721.  
  722.    for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
  723.       if (s->used_layers & (1 << i)) {
  724.          struct vl_compositor_layer *layer = &s->layers[i];
  725.          gen_rect_verts(vb, layer);
  726.          vb += 20;
  727.  
  728.          if (!layer->viewport_valid) {
  729.             layer->viewport.scale[0] = c->fb_state.width;
  730.             layer->viewport.scale[1] = c->fb_state.height;
  731.             layer->viewport.translate[0] = 0;
  732.             layer->viewport.translate[1] = 0;
  733.          }
  734.  
  735.          if (dirty && layer->clearing) {
  736.             struct u_rect drawn = calc_drawn_area(s, layer);
  737.             if (
  738.              dirty->x0 >= drawn.x0 &&
  739.              dirty->y0 >= drawn.y0 &&
  740.              dirty->x1 <= drawn.x1 &&
  741.              dirty->y1 <= drawn.y1) {
  742.  
  743.                // We clear the dirty area anyway, no need for clear_render_target
  744.                dirty->x0 = dirty->y0 = MAX_DIRTY;
  745.                dirty->x1 = dirty->y1 = MIN_DIRTY;
  746.             }
  747.          }
  748.       }
  749.    }
  750.  
  751.    u_upload_unmap(c->upload);
  752. }
  753.  
  754. static void
  755. draw_layers(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty)
  756. {
  757.    unsigned vb_index, i;
  758.  
  759.    assert(c);
  760.  
  761.    for (i = 0, vb_index = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
  762.       if (s->used_layers & (1 << i)) {
  763.          struct vl_compositor_layer *layer = &s->layers[i];
  764.          struct pipe_sampler_view **samplers = &layer->sampler_views[0];
  765.          unsigned num_sampler_views = !samplers[1] ? 1 : !samplers[2] ? 2 : 3;
  766.          void *blend = layer->blend ? layer->blend : i ? c->blend_add : c->blend_clear;
  767.  
  768.          c->pipe->bind_blend_state(c->pipe, blend);
  769.          c->pipe->set_viewport_states(c->pipe, 0, 1, &layer->viewport);
  770.          c->pipe->bind_fs_state(c->pipe, layer->fs);
  771.          c->pipe->bind_sampler_states(c->pipe, PIPE_SHADER_FRAGMENT, 0,
  772.                                       num_sampler_views, layer->samplers);
  773.          c->pipe->set_sampler_views(c->pipe, PIPE_SHADER_FRAGMENT, 0,
  774.                                     num_sampler_views, samplers);
  775.  
  776.          util_draw_arrays(c->pipe, PIPE_PRIM_QUADS, vb_index * 4, 4);
  777.          vb_index++;
  778.  
  779.          if (dirty) {
  780.             // Remember the currently drawn area as dirty for the next draw command
  781.             struct u_rect drawn = calc_drawn_area(s, layer);
  782.             dirty->x0 = MIN2(drawn.x0, dirty->x0);
  783.             dirty->y0 = MIN2(drawn.y0, dirty->y0);
  784.             dirty->x1 = MAX2(drawn.x1, dirty->x1);
  785.             dirty->y1 = MAX2(drawn.y1, dirty->y1);
  786.          }
  787.       }
  788.    }
  789. }
  790.  
  791. void
  792. vl_compositor_reset_dirty_area(struct u_rect *dirty)
  793. {
  794.    assert(dirty);
  795.  
  796.    dirty->x0 = dirty->y0 = MIN_DIRTY;
  797.    dirty->x1 = dirty->y1 = MAX_DIRTY;
  798. }
  799.  
  800. void
  801. vl_compositor_set_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
  802. {
  803.    assert(s);
  804.    assert(color);
  805.  
  806.    s->clear_color = *color;
  807. }
  808.  
  809. void
  810. vl_compositor_get_clear_color(struct vl_compositor_state *s, union pipe_color_union *color)
  811. {
  812.    assert(s);
  813.    assert(color);
  814.  
  815.    *color = s->clear_color;
  816. }
  817.  
  818. void
  819. vl_compositor_clear_layers(struct vl_compositor_state *s)
  820. {
  821.    unsigned i, j;
  822.  
  823.    assert(s);
  824.  
  825.    s->used_layers = 0;
  826.    for ( i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i) {
  827.       struct vertex4f v_one = { 1.0f, 1.0f, 1.0f, 1.0f };
  828.       s->layers[i].clearing = i ? false : true;
  829.       s->layers[i].blend = NULL;
  830.       s->layers[i].fs = NULL;
  831.       s->layers[i].viewport.scale[2] = 1;
  832.       s->layers[i].viewport.translate[2] = 0;
  833.       s->layers[i].rotate = VL_COMPOSITOR_ROTATE_0;
  834.  
  835.       for ( j = 0; j < 3; j++)
  836.          pipe_sampler_view_reference(&s->layers[i].sampler_views[j], NULL);
  837.       for ( j = 0; j < 4; ++j)
  838.          s->layers[i].colors[j] = v_one;
  839.    }
  840. }
  841.  
  842. void
  843. vl_compositor_cleanup(struct vl_compositor *c)
  844. {
  845.    assert(c);
  846.  
  847.    u_upload_destroy(c->upload);
  848.    cleanup_buffers(c);
  849.    cleanup_shaders(c);
  850.    cleanup_pipe_state(c);
  851. }
  852.  
  853. void
  854. vl_compositor_set_csc_matrix(struct vl_compositor_state *s, vl_csc_matrix const *matrix)
  855. {
  856.    struct pipe_transfer *buf_transfer;
  857.  
  858.    assert(s);
  859.  
  860.    memcpy
  861.    (
  862.       pipe_buffer_map(s->pipe, s->csc_matrix,
  863.                       PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
  864.                       &buf_transfer),
  865.       matrix,
  866.       sizeof(vl_csc_matrix)
  867.    );
  868.  
  869.    pipe_buffer_unmap(s->pipe, buf_transfer);
  870. }
  871.  
  872. void
  873. vl_compositor_set_dst_clip(struct vl_compositor_state *s, struct u_rect *dst_clip)
  874. {
  875.    assert(s);
  876.  
  877.    s->scissor_valid = dst_clip != NULL;
  878.    if (dst_clip) {
  879.       s->scissor.minx = dst_clip->x0;
  880.       s->scissor.miny = dst_clip->y0;
  881.       s->scissor.maxx = dst_clip->x1;
  882.       s->scissor.maxy = dst_clip->y1;
  883.    }
  884. }
  885.  
  886. void
  887. vl_compositor_set_layer_blend(struct vl_compositor_state *s,
  888.                               unsigned layer, void *blend,
  889.                               bool is_clearing)
  890. {
  891.    assert(s && blend);
  892.  
  893.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  894.  
  895.    s->layers[layer].clearing = is_clearing;
  896.    s->layers[layer].blend = blend;
  897. }
  898.  
  899. void
  900. vl_compositor_set_layer_dst_area(struct vl_compositor_state *s,
  901.                                  unsigned layer, struct u_rect *dst_area)
  902. {
  903.    assert(s);
  904.  
  905.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  906.  
  907.    s->layers[layer].viewport_valid = dst_area != NULL;
  908.    if (dst_area) {
  909.       s->layers[layer].viewport.scale[0] = dst_area->x1 - dst_area->x0;
  910.       s->layers[layer].viewport.scale[1] = dst_area->y1 - dst_area->y0;
  911.       s->layers[layer].viewport.translate[0] = dst_area->x0;
  912.       s->layers[layer].viewport.translate[1] = dst_area->y0;
  913.    }
  914. }
  915.  
  916. void
  917. vl_compositor_set_buffer_layer(struct vl_compositor_state *s,
  918.                                struct vl_compositor *c,
  919.                                unsigned layer,
  920.                                struct pipe_video_buffer *buffer,
  921.                                struct u_rect *src_rect,
  922.                                struct u_rect *dst_rect,
  923.                                enum vl_compositor_deinterlace deinterlace)
  924. {
  925.    struct pipe_sampler_view **sampler_views;
  926.    unsigned i;
  927.  
  928.    assert(s && c && buffer);
  929.  
  930.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  931.  
  932.    s->used_layers |= 1 << layer;
  933.    sampler_views = buffer->get_sampler_view_components(buffer);
  934.    for (i = 0; i < 3; ++i) {
  935.       s->layers[layer].samplers[i] = c->sampler_linear;
  936.       pipe_sampler_view_reference(&s->layers[layer].sampler_views[i], sampler_views[i]);
  937.    }
  938.  
  939.    calc_src_and_dst(&s->layers[layer], buffer->width, buffer->height,
  940.                     src_rect ? *src_rect : default_rect(&s->layers[layer]),
  941.                     dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
  942.  
  943.    if (buffer->interlaced) {
  944.       float half_a_line = 0.5f / s->layers[layer].zw.y;
  945.       switch(deinterlace) {
  946.       case VL_COMPOSITOR_WEAVE:
  947.          s->layers[layer].fs = c->fs_weave;
  948.          break;
  949.  
  950.       case VL_COMPOSITOR_BOB_TOP:
  951.          s->layers[layer].zw.x = 0.0f;
  952.          s->layers[layer].src.tl.y += half_a_line;
  953.          s->layers[layer].src.br.y += half_a_line;
  954.          s->layers[layer].fs = c->fs_video_buffer;
  955.          break;
  956.  
  957.       case VL_COMPOSITOR_BOB_BOTTOM:
  958.          s->layers[layer].zw.x = 1.0f;
  959.          s->layers[layer].src.tl.y -= half_a_line;
  960.          s->layers[layer].src.br.y -= half_a_line;
  961.          s->layers[layer].fs = c->fs_video_buffer;
  962.          break;
  963.       }
  964.  
  965.    } else
  966.       s->layers[layer].fs = c->fs_video_buffer;
  967. }
  968.  
  969. void
  970. vl_compositor_set_palette_layer(struct vl_compositor_state *s,
  971.                                 struct vl_compositor *c,
  972.                                 unsigned layer,
  973.                                 struct pipe_sampler_view *indexes,
  974.                                 struct pipe_sampler_view *palette,
  975.                                 struct u_rect *src_rect,
  976.                                 struct u_rect *dst_rect,
  977.                                 bool include_color_conversion)
  978. {
  979.    assert(s && c && indexes && palette);
  980.  
  981.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  982.  
  983.    s->used_layers |= 1 << layer;
  984.  
  985.    s->layers[layer].fs = include_color_conversion ?
  986.       c->fs_palette.yuv : c->fs_palette.rgb;
  987.  
  988.    s->layers[layer].samplers[0] = c->sampler_linear;
  989.    s->layers[layer].samplers[1] = c->sampler_nearest;
  990.    s->layers[layer].samplers[2] = NULL;
  991.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], indexes);
  992.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], palette);
  993.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
  994.    calc_src_and_dst(&s->layers[layer], indexes->texture->width0, indexes->texture->height0,
  995.                     src_rect ? *src_rect : default_rect(&s->layers[layer]),
  996.                     dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
  997. }
  998.  
  999. void
  1000. vl_compositor_set_rgba_layer(struct vl_compositor_state *s,
  1001.                              struct vl_compositor *c,
  1002.                              unsigned layer,
  1003.                              struct pipe_sampler_view *rgba,
  1004.                              struct u_rect *src_rect,
  1005.                              struct u_rect *dst_rect,
  1006.                              struct vertex4f *colors)
  1007. {
  1008.    unsigned i;
  1009.  
  1010.    assert(s && c && rgba);
  1011.  
  1012.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  1013.  
  1014.    s->used_layers |= 1 << layer;
  1015.    s->layers[layer].fs = c->fs_rgba;
  1016.    s->layers[layer].samplers[0] = c->sampler_linear;
  1017.    s->layers[layer].samplers[1] = NULL;
  1018.    s->layers[layer].samplers[2] = NULL;
  1019.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[0], rgba);
  1020.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[1], NULL);
  1021.    pipe_sampler_view_reference(&s->layers[layer].sampler_views[2], NULL);
  1022.    calc_src_and_dst(&s->layers[layer], rgba->texture->width0, rgba->texture->height0,
  1023.                     src_rect ? *src_rect : default_rect(&s->layers[layer]),
  1024.                     dst_rect ? *dst_rect : default_rect(&s->layers[layer]));
  1025.  
  1026.    if (colors)
  1027.       for (i = 0; i < 4; ++i)
  1028.          s->layers[layer].colors[i] = colors[i];
  1029. }
  1030.  
  1031. void
  1032. vl_compositor_set_layer_rotation(struct vl_compositor_state *s,
  1033.                                  unsigned layer,
  1034.                                  enum vl_compositor_rotation rotate)
  1035. {
  1036.    assert(s);
  1037.    assert(layer < VL_COMPOSITOR_MAX_LAYERS);
  1038.    s->layers[layer].rotate = rotate;
  1039. }
  1040.  
  1041. void
  1042. vl_compositor_render(struct vl_compositor_state *s,
  1043.                      struct vl_compositor       *c,
  1044.                      struct pipe_surface        *dst_surface,
  1045.                      struct u_rect              *dirty_area,
  1046.                      bool                        clear_dirty)
  1047. {
  1048.    assert(c);
  1049.    assert(dst_surface);
  1050.  
  1051.    c->fb_state.width = dst_surface->width;
  1052.    c->fb_state.height = dst_surface->height;
  1053.    c->fb_state.cbufs[0] = dst_surface;
  1054.    
  1055.    if (!s->scissor_valid) {
  1056.       s->scissor.minx = 0;
  1057.       s->scissor.miny = 0;
  1058.       s->scissor.maxx = dst_surface->width;
  1059.       s->scissor.maxy = dst_surface->height;
  1060.    }
  1061.    c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
  1062.  
  1063.    gen_vertex_data(c, s, dirty_area);
  1064.  
  1065.    if (clear_dirty && dirty_area &&
  1066.        (dirty_area->x0 < dirty_area->x1 || dirty_area->y0 < dirty_area->y1)) {
  1067.  
  1068.       c->pipe->clear_render_target(c->pipe, dst_surface, &s->clear_color,
  1069.                                    0, 0, dst_surface->width, dst_surface->height);
  1070.       dirty_area->x0 = dirty_area->y0 = MAX_DIRTY;
  1071.       dirty_area->x1 = dirty_area->y1 = MIN_DIRTY;
  1072.    }
  1073.  
  1074.    c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
  1075.    c->pipe->bind_vs_state(c->pipe, c->vs);
  1076.    c->pipe->set_vertex_buffers(c->pipe, 0, 1, &c->vertex_buf);
  1077.    c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
  1078.    pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->csc_matrix);
  1079.    c->pipe->bind_rasterizer_state(c->pipe, c->rast);
  1080.  
  1081.    draw_layers(c, s, dirty_area);
  1082. }
  1083.  
  1084. bool
  1085. vl_compositor_init(struct vl_compositor *c, struct pipe_context *pipe)
  1086. {
  1087.    assert(c);
  1088.  
  1089.    memset(c, 0, sizeof(*c));
  1090.  
  1091.    c->pipe = pipe;
  1092.  
  1093.    c->upload = u_upload_create(pipe, 128 * 1024, 4, PIPE_BIND_VERTEX_BUFFER);
  1094.  
  1095.    if (!c->upload)
  1096.       return false;
  1097.  
  1098.    if (!init_pipe_state(c)) {
  1099.       u_upload_destroy(c->upload);
  1100.       return false;
  1101.    }
  1102.  
  1103.    if (!init_shaders(c)) {
  1104.       u_upload_destroy(c->upload);
  1105.       cleanup_pipe_state(c);
  1106.       return false;
  1107.    }
  1108.  
  1109.    if (!init_buffers(c)) {
  1110.       u_upload_destroy(c->upload);
  1111.       cleanup_shaders(c);
  1112.       cleanup_pipe_state(c);
  1113.       return false;
  1114.    }
  1115.  
  1116.    return true;
  1117. }
  1118.  
  1119. bool
  1120. vl_compositor_init_state(struct vl_compositor_state *s, struct pipe_context *pipe)
  1121. {
  1122.    vl_csc_matrix csc_matrix;
  1123.  
  1124.    assert(s);
  1125.  
  1126.    memset(s, 0, sizeof(*s));
  1127.  
  1128.    s->pipe = pipe;
  1129.  
  1130.    s->clear_color.f[0] = s->clear_color.f[1] = 0.0f;
  1131.    s->clear_color.f[2] = s->clear_color.f[3] = 0.0f;
  1132.  
  1133.    /*
  1134.     * Create our fragment shader's constant buffer
  1135.     * Const buffer contains the color conversion matrix and bias vectors
  1136.     */
  1137.    /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
  1138.    s->csc_matrix = pipe_buffer_create
  1139.    (
  1140.       pipe->screen,
  1141.       PIPE_BIND_CONSTANT_BUFFER,
  1142.       PIPE_USAGE_DEFAULT,
  1143.       sizeof(csc_matrix)
  1144.    );
  1145.  
  1146.    vl_compositor_clear_layers(s);
  1147.  
  1148.    vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, &csc_matrix);
  1149.    vl_compositor_set_csc_matrix(s, (const vl_csc_matrix *)&csc_matrix);
  1150.  
  1151.    return true;
  1152. }
  1153.  
  1154. void
  1155. vl_compositor_cleanup_state(struct vl_compositor_state *s)
  1156. {
  1157.    assert(s);
  1158.  
  1159.    vl_compositor_clear_layers(s);
  1160.    pipe_resource_reference(&s->csc_matrix, NULL);
  1161. }
  1162.