Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * ColorMatrix v2.2 for Avisynth 2.5.x
  3.  *
  4.  * Copyright (C) 2006-2007 Kevin Stone
  5.  *
  6.  * ColorMatrix 1.x is Copyright (C) Wilbert Dijkhof
  7.  *
  8.  * This program is free software; you can redistribute it and/or modify it
  9.  * under the terms of the GNU General Public License as published by the
  10.  * Free Software Foundation; either version 2 of the License, or (at your
  11.  * option) any later version.
  12.  *
  13.  * This program is distributed in the hope that it will be useful, but
  14.  * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15.  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
  16.  * License for more details.
  17.  *
  18.  * You should have received a copy of the GNU General Public License
  19.  * along with this program; if not, write to the Free Software Foundation,
  20.  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21.  */
  22.  
  23. /**
  24.  * @file
  25.  * ColorMatrix 2.0 is based on the original ColorMatrix filter by Wilbert
  26.  * Dijkhof.  It adds the ability to convert between any of: Rec.709, FCC,
  27.  * Rec.601, and SMPTE 240M. It also makes pre and post clipping optional,
  28.  * adds an option to use scaled or non-scaled coefficients, and more...
  29.  */
  30.  
  31. #include <float.h>
  32. #include "avfilter.h"
  33. #include "formats.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. #include "libavutil/opt.h"
  37. #include "libavutil/pixdesc.h"
  38. #include "libavutil/avstring.h"
  39.  
  40. #define NS(n) ((n) < 0 ? (int)((n)*65536.0-0.5+DBL_EPSILON) : (int)((n)*65536.0+0.5))
  41. #define CB(n) av_clip_uint8(n)
  42.  
  43. static const double yuv_coeff[4][3][3] = {
  44.     { { +0.7152, +0.0722, +0.2126 }, // Rec.709 (0)
  45.       { -0.3850, +0.5000, -0.1150 },
  46.       { -0.4540, -0.0460, +0.5000 } },
  47.     { { +0.5900, +0.1100, +0.3000 }, // FCC (1)
  48.       { -0.3310, +0.5000, -0.1690 },
  49.       { -0.4210, -0.0790, +0.5000 } },
  50.     { { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M) (2)
  51.       { -0.3313, +0.5000, -0.1687 },
  52.       { -0.4187, -0.0813, +0.5000 } },
  53.     { { +0.7010, +0.0870, +0.2120 }, // SMPTE 240M (3)
  54.       { -0.3840, +0.5000, -0.1160 },
  55.       { -0.4450, -0.0550, +0.5000 } },
  56. };
  57.  
  58. enum ColorMode {
  59.     COLOR_MODE_NONE = -1,
  60.     COLOR_MODE_BT709,
  61.     COLOR_MODE_FCC,
  62.     COLOR_MODE_BT601,
  63.     COLOR_MODE_SMPTE240M,
  64.     COLOR_MODE_COUNT
  65. };
  66.  
  67. typedef struct {
  68.     const AVClass *class;
  69.     int yuv_convert[16][3][3];
  70.     int interlaced;
  71.     int source, dest;        ///< ColorMode
  72.     int mode;
  73.     int hsub, vsub;
  74. } ColorMatrixContext;
  75.  
  76. typedef struct ThreadData {
  77.     AVFrame *dst;
  78.     const AVFrame *src;
  79.     int c2;
  80.     int c3;
  81.     int c4;
  82.     int c5;
  83.     int c6;
  84.     int c7;
  85. } ThreadData;
  86.  
  87. #define OFFSET(x) offsetof(ColorMatrixContext, x)
  88. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  89.  
  90. static const AVOption colormatrix_options[] = {
  91.     { "src", "set source color matrix",      OFFSET(source), AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
  92.     { "dst", "set destination color matrix", OFFSET(dest),   AV_OPT_TYPE_INT, {.i64=COLOR_MODE_NONE}, COLOR_MODE_NONE, COLOR_MODE_COUNT-1, .flags=FLAGS, .unit="color_mode" },
  93.     { "bt709",     "set BT.709 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT709},       .flags=FLAGS, .unit="color_mode" },
  94.     { "fcc",       "set FCC colorspace   ",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_FCC},         .flags=FLAGS, .unit="color_mode" },
  95.     { "bt601",     "set BT.601 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
  96.     { "bt470",     "set BT.470 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
  97.     { "bt470bg",   "set BT.470 colorspace",      0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
  98.     { "smpte170m", "set SMTPE-170M colorspace",  0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_BT601},       .flags=FLAGS, .unit="color_mode" },
  99.     { "smpte240m", "set SMPTE-240M colorspace",  0, AV_OPT_TYPE_CONST, {.i64=COLOR_MODE_SMPTE240M},   .flags=FLAGS, .unit="color_mode" },
  100.     { NULL }
  101. };
  102.  
  103. AVFILTER_DEFINE_CLASS(colormatrix);
  104.  
  105. #define ma m[0][0]
  106. #define mb m[0][1]
  107. #define mc m[0][2]
  108. #define md m[1][0]
  109. #define me m[1][1]
  110. #define mf m[1][2]
  111. #define mg m[2][0]
  112. #define mh m[2][1]
  113. #define mi m[2][2]
  114.  
  115. #define ima im[0][0]
  116. #define imb im[0][1]
  117. #define imc im[0][2]
  118. #define imd im[1][0]
  119. #define ime im[1][1]
  120. #define imf im[1][2]
  121. #define img im[2][0]
  122. #define imh im[2][1]
  123. #define imi im[2][2]
  124.  
  125. static void inverse3x3(double im[3][3], const double m[3][3])
  126. {
  127.     double det = ma * (me * mi - mf * mh) - mb * (md * mi - mf * mg) + mc * (md * mh - me * mg);
  128.     det = 1.0 / det;
  129.     ima = det * (me * mi - mf * mh);
  130.     imb = det * (mc * mh - mb * mi);
  131.     imc = det * (mb * mf - mc * me);
  132.     imd = det * (mf * mg - md * mi);
  133.     ime = det * (ma * mi - mc * mg);
  134.     imf = det * (mc * md - ma * mf);
  135.     img = det * (md * mh - me * mg);
  136.     imh = det * (mb * mg - ma * mh);
  137.     imi = det * (ma * me - mb * md);
  138. }
  139.  
  140. static void solve_coefficients(double cm[3][3], double rgb[3][3], const double yuv[3][3])
  141. {
  142.     int i, j;
  143.     for (i = 0; i < 3; i++)
  144.         for (j = 0; j < 3; j++)
  145.             cm[i][j] = yuv[i][0] * rgb[0][j] + yuv[i][1] * rgb[1][j] + yuv[i][2] * rgb[2][j];
  146. }
  147.  
  148. static void calc_coefficients(AVFilterContext *ctx)
  149. {
  150.     ColorMatrixContext *color = ctx->priv;
  151.     double rgb_coeffd[4][3][3];
  152.     double yuv_convertd[16][3][3];
  153.     int v = 0;
  154.     int i, j, k;
  155.  
  156.     for (i = 0; i < 4; i++)
  157.         inverse3x3(rgb_coeffd[i], yuv_coeff[i]);
  158.     for (i = 0; i < 4; i++) {
  159.         for (j = 0; j < 4; j++) {
  160.             solve_coefficients(yuv_convertd[v], rgb_coeffd[i], yuv_coeff[j]);
  161.             for (k = 0; k < 3; k++) {
  162.                 color->yuv_convert[v][k][0] = NS(yuv_convertd[v][k][0]);
  163.                 color->yuv_convert[v][k][1] = NS(yuv_convertd[v][k][1]);
  164.                 color->yuv_convert[v][k][2] = NS(yuv_convertd[v][k][2]);
  165.             }
  166.             if (color->yuv_convert[v][0][0] != 65536 || color->yuv_convert[v][1][0] != 0 ||
  167.                 color->yuv_convert[v][2][0] != 0) {
  168.                 av_log(ctx, AV_LOG_ERROR, "error calculating conversion coefficients\n");
  169.             }
  170.             v++;
  171.         }
  172.     }
  173. }
  174.  
  175. static const char * const color_modes[] = {"bt709", "fcc", "bt601", "smpte240m"};
  176.  
  177. static av_cold int init(AVFilterContext *ctx)
  178. {
  179.     ColorMatrixContext *color = ctx->priv;
  180.  
  181.     if (color->dest == COLOR_MODE_NONE) {
  182.         av_log(ctx, AV_LOG_ERROR, "Unspecified destination color space\n");
  183.         return AVERROR(EINVAL);
  184.     }
  185.  
  186.     if (color->source == color->dest) {
  187.         av_log(ctx, AV_LOG_ERROR, "Source and destination color space must not be identical\n");
  188.         return AVERROR(EINVAL);
  189.     }
  190.  
  191.     calc_coefficients(ctx);
  192.  
  193.     return 0;
  194. }
  195.  
  196. static int process_slice_uyvy422(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  197. {
  198.     const ThreadData *td = arg;
  199.     const AVFrame *src = td->src;
  200.     AVFrame *dst = td->dst;
  201.     const int height = src->height;
  202.     const int width = src->width*2;
  203.     const int src_pitch = src->linesize[0];
  204.     const int dst_pitch = dst->linesize[0];
  205.     const int slice_start = (height *  jobnr   ) / nb_jobs;
  206.     const int slice_end   = (height * (jobnr+1)) / nb_jobs;
  207.     const unsigned char *srcp = src->data[0] + slice_start * src_pitch;
  208.     unsigned char *dstp = dst->data[0] + slice_start * dst_pitch;
  209.     const int c2 = td->c2;
  210.     const int c3 = td->c3;
  211.     const int c4 = td->c4;
  212.     const int c5 = td->c5;
  213.     const int c6 = td->c6;
  214.     const int c7 = td->c7;
  215.     int x, y;
  216.  
  217.     for (y = slice_start; y < slice_end; y++) {
  218.         for (x = 0; x < width; x += 4) {
  219.             const int u = srcp[x + 0] - 128;
  220.             const int v = srcp[x + 2] - 128;
  221.             const int uvval = c2 * u + c3 * v + 1081344;
  222.             dstp[x + 0] = CB((c4 * u + c5 * v + 8421376) >> 16);
  223.             dstp[x + 1] = CB((65536 * (srcp[x + 1] - 16) + uvval) >> 16);
  224.             dstp[x + 2] = CB((c6 * u + c7 * v + 8421376) >> 16);
  225.             dstp[x + 3] = CB((65536 * (srcp[x + 3] - 16) + uvval) >> 16);
  226.         }
  227.         srcp += src_pitch;
  228.         dstp += dst_pitch;
  229.     }
  230.  
  231.     return 0;
  232. }
  233.  
  234. static int process_slice_yuv444p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  235. {
  236.     const ThreadData *td = arg;
  237.     const AVFrame *src = td->src;
  238.     AVFrame *dst = td->dst;
  239.     const int height = src->height;
  240.     const int width = src->width;
  241.     const int slice_start = (height *  jobnr   ) / nb_jobs;
  242.     const int slice_end   = (height * (jobnr+1)) / nb_jobs;
  243.     const int src_pitchY  = src->linesize[0];
  244.     const int src_pitchUV = src->linesize[1];
  245.     const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
  246.     const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
  247.     const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
  248.     const int dst_pitchY  = dst->linesize[0];
  249.     const int dst_pitchUV = dst->linesize[1];
  250.     unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
  251.     unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
  252.     unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
  253.     const int c2 = td->c2;
  254.     const int c3 = td->c3;
  255.     const int c4 = td->c4;
  256.     const int c5 = td->c5;
  257.     const int c6 = td->c6;
  258.     const int c7 = td->c7;
  259.     int x, y;
  260.  
  261.     for (y = slice_start; y < slice_end; y++) {
  262.         for (x = 0; x < width; x++) {
  263.             const int u = srcpU[x] - 128;
  264.             const int v = srcpV[x] - 128;
  265.             const int uvval = c2 * u + c3 * v + 1081344;
  266.             dstpY[x] = CB((65536 * (srcpY[x] - 16) + uvval) >> 16);
  267.             dstpU[x] = CB((c4 * u + c5 * v + 8421376) >> 16);
  268.             dstpV[x] = CB((c6 * u + c7 * v + 8421376) >> 16);
  269.         }
  270.         srcpY += src_pitchY;
  271.         dstpY += dst_pitchY;
  272.         srcpU += src_pitchUV;
  273.         srcpV += src_pitchUV;
  274.         dstpU += dst_pitchUV;
  275.         dstpV += dst_pitchUV;
  276.     }
  277.  
  278.     return 0;
  279. }
  280.  
  281. static int process_slice_yuv422p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  282. {
  283.     const ThreadData *td = arg;
  284.     const AVFrame *src = td->src;
  285.     AVFrame *dst = td->dst;
  286.     const int height = src->height;
  287.     const int width = src->width;
  288.     const int slice_start = (height *  jobnr   ) / nb_jobs;
  289.     const int slice_end   = (height * (jobnr+1)) / nb_jobs;
  290.     const int src_pitchY  = src->linesize[0];
  291.     const int src_pitchUV = src->linesize[1];
  292.     const unsigned char *srcpU = src->data[1] + slice_start * src_pitchUV;
  293.     const unsigned char *srcpV = src->data[2] + slice_start * src_pitchUV;
  294.     const unsigned char *srcpY = src->data[0] + slice_start * src_pitchY;
  295.     const int dst_pitchY  = dst->linesize[0];
  296.     const int dst_pitchUV = dst->linesize[1];
  297.     unsigned char *dstpU = dst->data[1] + slice_start * dst_pitchUV;
  298.     unsigned char *dstpV = dst->data[2] + slice_start * dst_pitchUV;
  299.     unsigned char *dstpY = dst->data[0] + slice_start * dst_pitchY;
  300.     const int c2 = td->c2;
  301.     const int c3 = td->c3;
  302.     const int c4 = td->c4;
  303.     const int c5 = td->c5;
  304.     const int c6 = td->c6;
  305.     const int c7 = td->c7;
  306.     int x, y;
  307.  
  308.     for (y = slice_start; y < slice_end; y++) {
  309.         for (x = 0; x < width; x += 2) {
  310.             const int u = srcpU[x >> 1] - 128;
  311.             const int v = srcpV[x >> 1] - 128;
  312.             const int uvval = c2 * u + c3 * v + 1081344;
  313.             dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
  314.             dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
  315.             dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
  316.             dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
  317.         }
  318.         srcpY += src_pitchY;
  319.         dstpY += dst_pitchY;
  320.         srcpU += src_pitchUV;
  321.         srcpV += src_pitchUV;
  322.         dstpU += dst_pitchUV;
  323.         dstpV += dst_pitchUV;
  324.     }
  325.  
  326.     return 0;
  327. }
  328.  
  329. static int process_slice_yuv420p(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  330. {
  331.     const ThreadData *td = arg;
  332.     const AVFrame *src = td->src;
  333.     AVFrame *dst = td->dst;
  334.     const int height = FFALIGN(src->height, 2) >> 1;
  335.     const int width = src->width;
  336.     const int slice_start = ((height *  jobnr   ) / nb_jobs) << 1;
  337.     const int slice_end   = ((height * (jobnr+1)) / nb_jobs) << 1;
  338.     const int src_pitchY  = src->linesize[0];
  339.     const int src_pitchUV = src->linesize[1];
  340.     const int dst_pitchY  = dst->linesize[0];
  341.     const int dst_pitchUV = dst->linesize[1];
  342.     const unsigned char *srcpY = src->data[0] + src_pitchY * slice_start;
  343.     const unsigned char *srcpU = src->data[1] + src_pitchUV * (slice_start >> 1);
  344.     const unsigned char *srcpV = src->data[2] + src_pitchUV * (slice_start >> 1);
  345.     const unsigned char *srcpN = src->data[0] + src_pitchY * (slice_start + 1);
  346.     unsigned char *dstpU = dst->data[1] + dst_pitchUV * (slice_start >> 1);
  347.     unsigned char *dstpV = dst->data[2] + dst_pitchUV * (slice_start >> 1);
  348.     unsigned char *dstpY = dst->data[0] + dst_pitchY * slice_start;
  349.     unsigned char *dstpN = dst->data[0] + dst_pitchY * (slice_start + 1);
  350.     const int c2 = td->c2;
  351.     const int c3 = td->c3;
  352.     const int c4 = td->c4;
  353.     const int c5 = td->c5;
  354.     const int c6 = td->c6;
  355.     const int c7 = td->c7;
  356.     int x, y;
  357.  
  358.     for (y = slice_start; y < slice_end; y += 2) {
  359.         for (x = 0; x < width; x += 2) {
  360.             const int u = srcpU[x >> 1] - 128;
  361.             const int v = srcpV[x >> 1] - 128;
  362.             const int uvval = c2 * u + c3 * v + 1081344;
  363.             dstpY[x + 0] = CB((65536 * (srcpY[x + 0] - 16) + uvval) >> 16);
  364.             dstpY[x + 1] = CB((65536 * (srcpY[x + 1] - 16) + uvval) >> 16);
  365.             dstpN[x + 0] = CB((65536 * (srcpN[x + 0] - 16) + uvval) >> 16);
  366.             dstpN[x + 1] = CB((65536 * (srcpN[x + 1] - 16) + uvval) >> 16);
  367.             dstpU[x >> 1] = CB((c4 * u + c5 * v + 8421376) >> 16);
  368.             dstpV[x >> 1] = CB((c6 * u + c7 * v + 8421376) >> 16);
  369.         }
  370.         srcpY += src_pitchY << 1;
  371.         dstpY += dst_pitchY << 1;
  372.         srcpN += src_pitchY << 1;
  373.         dstpN += dst_pitchY << 1;
  374.         srcpU += src_pitchUV;
  375.         srcpV += src_pitchUV;
  376.         dstpU += dst_pitchUV;
  377.         dstpV += dst_pitchUV;
  378.     }
  379.  
  380.     return 0;
  381. }
  382.  
  383. static int config_input(AVFilterLink *inlink)
  384. {
  385.     AVFilterContext *ctx = inlink->dst;
  386.     ColorMatrixContext *color = ctx->priv;
  387.     const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  388.  
  389.     color->hsub = pix_desc->log2_chroma_w;
  390.     color->vsub = pix_desc->log2_chroma_h;
  391.  
  392.     av_log(ctx, AV_LOG_VERBOSE, "%s -> %s\n",
  393.            color_modes[color->source], color_modes[color->dest]);
  394.  
  395.     return 0;
  396. }
  397.  
  398. static int query_formats(AVFilterContext *ctx)
  399. {
  400.     static const enum AVPixelFormat pix_fmts[] = {
  401.         AV_PIX_FMT_YUV444P,
  402.         AV_PIX_FMT_YUV422P,
  403.         AV_PIX_FMT_YUV420P,
  404.         AV_PIX_FMT_UYVY422,
  405.         AV_PIX_FMT_NONE
  406.     };
  407.     AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
  408.     if (!fmts_list)
  409.         return AVERROR(ENOMEM);
  410.     return ff_set_common_formats(ctx, fmts_list);
  411. }
  412.  
  413. static int filter_frame(AVFilterLink *link, AVFrame *in)
  414. {
  415.     AVFilterContext *ctx = link->dst;
  416.     ColorMatrixContext *color = ctx->priv;
  417.     AVFilterLink *outlink = ctx->outputs[0];
  418.     AVFrame *out;
  419.     ThreadData td = {0};
  420.  
  421.     out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  422.     if (!out) {
  423.         av_frame_free(&in);
  424.         return AVERROR(ENOMEM);
  425.     }
  426.     av_frame_copy_props(out, in);
  427.  
  428.     if (color->source == COLOR_MODE_NONE) {
  429.         enum AVColorSpace cs = av_frame_get_colorspace(in);
  430.         enum ColorMode source;
  431.  
  432.         switch(cs) {
  433.         case AVCOL_SPC_BT709     : source = COLOR_MODE_BT709     ; break;
  434.         case AVCOL_SPC_FCC       : source = COLOR_MODE_FCC       ; break;
  435.         case AVCOL_SPC_SMPTE240M : source = COLOR_MODE_SMPTE240M ; break;
  436.         case AVCOL_SPC_BT470BG   : source = COLOR_MODE_BT601     ; break;
  437.         case AVCOL_SPC_SMPTE170M : source = COLOR_MODE_BT601     ; break;
  438.         default :
  439.             av_log(ctx, AV_LOG_ERROR, "Input frame does not specify a supported colorspace, and none has been specified as source either\n");
  440.             av_frame_free(&out);
  441.             return AVERROR(EINVAL);
  442.         }
  443.         color->mode = source * 4 + color->dest;
  444.     } else
  445.         color->mode = color->source * 4 + color->dest;
  446.  
  447.     switch(color->dest) {
  448.     case COLOR_MODE_BT709    : av_frame_set_colorspace(out, AVCOL_SPC_BT709)    ; break;
  449.     case COLOR_MODE_FCC      : av_frame_set_colorspace(out, AVCOL_SPC_FCC)      ; break;
  450.     case COLOR_MODE_SMPTE240M: av_frame_set_colorspace(out, AVCOL_SPC_SMPTE240M); break;
  451.     case COLOR_MODE_BT601    : av_frame_set_colorspace(out, AVCOL_SPC_BT470BG)  ; break;
  452.     }
  453.  
  454.     td.src = in;
  455.     td.dst = out;
  456.     td.c2 = color->yuv_convert[color->mode][0][1];
  457.     td.c3 = color->yuv_convert[color->mode][0][2];
  458.     td.c4 = color->yuv_convert[color->mode][1][1];
  459.     td.c5 = color->yuv_convert[color->mode][1][2];
  460.     td.c6 = color->yuv_convert[color->mode][2][1];
  461.     td.c7 = color->yuv_convert[color->mode][2][2];
  462.  
  463.     if (in->format == AV_PIX_FMT_YUV444P)
  464.         ctx->internal->execute(ctx, process_slice_yuv444p, &td, NULL,
  465.                                FFMIN(in->height, ctx->graph->nb_threads));
  466.     else if (in->format == AV_PIX_FMT_YUV422P)
  467.         ctx->internal->execute(ctx, process_slice_yuv422p, &td, NULL,
  468.                                FFMIN(in->height, ctx->graph->nb_threads));
  469.     else if (in->format == AV_PIX_FMT_YUV420P)
  470.         ctx->internal->execute(ctx, process_slice_yuv420p, &td, NULL,
  471.                                FFMIN(in->height / 2, ctx->graph->nb_threads));
  472.     else
  473.         ctx->internal->execute(ctx, process_slice_uyvy422, &td, NULL,
  474.                                FFMIN(in->height, ctx->graph->nb_threads));
  475.  
  476.     av_frame_free(&in);
  477.     return ff_filter_frame(outlink, out);
  478. }
  479.  
  480. static const AVFilterPad colormatrix_inputs[] = {
  481.     {
  482.         .name         = "default",
  483.         .type         = AVMEDIA_TYPE_VIDEO,
  484.         .config_props = config_input,
  485.         .filter_frame = filter_frame,
  486.     },
  487.     { NULL }
  488. };
  489.  
  490. static const AVFilterPad colormatrix_outputs[] = {
  491.     {
  492.         .name = "default",
  493.         .type = AVMEDIA_TYPE_VIDEO,
  494.     },
  495.     { NULL }
  496. };
  497.  
  498. AVFilter ff_vf_colormatrix = {
  499.     .name          = "colormatrix",
  500.     .description   = NULL_IF_CONFIG_SMALL("Convert color matrix."),
  501.     .priv_size     = sizeof(ColorMatrixContext),
  502.     .init          = init,
  503.     .query_formats = query_formats,
  504.     .inputs        = colormatrix_inputs,
  505.     .outputs       = colormatrix_outputs,
  506.     .priv_class    = &colormatrix_class,
  507.     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
  508. };
  509.