Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2007 Benoit Fouet
  3.  * Copyright (c) 2010 Stefano Sabatini
  4.  *
  5.  * This file is part of FFmpeg.
  6.  *
  7.  * FFmpeg is free software; you can redistribute it and/or
  8.  * modify it under the terms of the GNU Lesser General Public
  9.  * License as published by the Free Software Foundation; either
  10.  * version 2.1 of the License, or (at your option) any later version.
  11.  *
  12.  * FFmpeg is distributed in the hope that it will be useful,
  13.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15.  * Lesser General Public License for more details.
  16.  *
  17.  * You should have received a copy of the GNU Lesser General Public
  18.  * License along with FFmpeg; if not, write to the Free Software
  19.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20.  */
  21.  
  22. /**
  23.  * @file
  24.  * horizontal flip filter
  25.  */
  26.  
  27. #include <string.h>
  28.  
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "libavutil/internal.h"
  35. #include "libavutil/intreadwrite.h"
  36. #include "libavutil/imgutils.h"
  37.  
  38. typedef struct FlipContext {
  39.     int max_step[4];    ///< max pixel step for each plane, expressed as a number of bytes
  40.     int planewidth[4];  ///< width of each plane
  41.     int planeheight[4]; ///< height of each plane
  42. } FlipContext;
  43.  
  44. static int query_formats(AVFilterContext *ctx)
  45. {
  46.     AVFilterFormats *pix_fmts = NULL;
  47.     int fmt;
  48.  
  49.     for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  50.         const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  51.         if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
  52.               desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
  53.               (desc->log2_chroma_w != desc->log2_chroma_h &&
  54.                desc->comp[0].plane == desc->comp[1].plane)))
  55.             ff_add_format(&pix_fmts, fmt);
  56.     }
  57.  
  58.     return ff_set_common_formats(ctx, pix_fmts);
  59. }
  60.  
  61. static int config_props(AVFilterLink *inlink)
  62. {
  63.     FlipContext *s = inlink->dst->priv;
  64.     const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
  65.     const int hsub = pix_desc->log2_chroma_w;
  66.     const int vsub = pix_desc->log2_chroma_h;
  67.  
  68.     av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
  69.     s->planewidth[0]  = s->planewidth[3]  = inlink->w;
  70.     s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, hsub);
  71.     s->planeheight[0] = s->planeheight[3] = inlink->h;
  72.     s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
  73.  
  74.     return 0;
  75. }
  76.  
  77. typedef struct ThreadData {
  78.     AVFrame *in, *out;
  79. } ThreadData;
  80.  
  81. static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
  82. {
  83.     FlipContext *s = ctx->priv;
  84.     ThreadData *td = arg;
  85.     AVFrame *in = td->in;
  86.     AVFrame *out = td->out;
  87.     uint8_t *inrow, *outrow;
  88.     int i, j, plane, step;
  89.  
  90.     for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
  91.         const int width  = s->planewidth[plane];
  92.         const int height = s->planeheight[plane];
  93.         const int start = (height *  job   ) / nb_jobs;
  94.         const int end   = (height * (job+1)) / nb_jobs;
  95.  
  96.         step = s->max_step[plane];
  97.  
  98.         outrow = out->data[plane] + start * out->linesize[plane];
  99.         inrow  = in ->data[plane] + start * in->linesize[plane] + (width - 1) * step;
  100.         for (i = start; i < end; i++) {
  101.             switch (step) {
  102.             case 1:
  103.                 for (j = 0; j < width; j++)
  104.                     outrow[j] = inrow[-j];
  105.             break;
  106.  
  107.             case 2:
  108.             {
  109.                 uint16_t *outrow16 = (uint16_t *)outrow;
  110.                 uint16_t * inrow16 = (uint16_t *) inrow;
  111.                 for (j = 0; j < width; j++)
  112.                     outrow16[j] = inrow16[-j];
  113.             }
  114.             break;
  115.  
  116.             case 3:
  117.             {
  118.                 uint8_t *in  =  inrow;
  119.                 uint8_t *out = outrow;
  120.                 for (j = 0; j < width; j++, out += 3, in -= 3) {
  121.                     int32_t v = AV_RB24(in);
  122.                     AV_WB24(out, v);
  123.                 }
  124.             }
  125.             break;
  126.  
  127.             case 4:
  128.             {
  129.                 uint32_t *outrow32 = (uint32_t *)outrow;
  130.                 uint32_t * inrow32 = (uint32_t *) inrow;
  131.                 for (j = 0; j < width; j++)
  132.                     outrow32[j] = inrow32[-j];
  133.             }
  134.             break;
  135.  
  136.             default:
  137.                 for (j = 0; j < width; j++)
  138.                     memcpy(outrow + j*step, inrow - j*step, step);
  139.             }
  140.  
  141.             inrow  += in ->linesize[plane];
  142.             outrow += out->linesize[plane];
  143.         }
  144.     }
  145.  
  146.     return 0;
  147. }
  148.  
  149. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  150. {
  151.     AVFilterContext *ctx  = inlink->dst;
  152.     AVFilterLink *outlink = ctx->outputs[0];
  153.     ThreadData td;
  154.     AVFrame *out;
  155.  
  156.     out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  157.     if (!out) {
  158.         av_frame_free(&in);
  159.         return AVERROR(ENOMEM);
  160.     }
  161.     av_frame_copy_props(out, in);
  162.  
  163.     /* copy palette if required */
  164.     if (av_pix_fmt_desc_get(inlink->format)->flags & AV_PIX_FMT_FLAG_PAL)
  165.         memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
  166.  
  167.     td.in = in, td.out = out;
  168.     ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
  169.  
  170.     av_frame_free(&in);
  171.     return ff_filter_frame(outlink, out);
  172. }
  173.  
  174. static const AVFilterPad avfilter_vf_hflip_inputs[] = {
  175.     {
  176.         .name         = "default",
  177.         .type         = AVMEDIA_TYPE_VIDEO,
  178.         .filter_frame = filter_frame,
  179.         .config_props = config_props,
  180.     },
  181.     { NULL }
  182. };
  183.  
  184. static const AVFilterPad avfilter_vf_hflip_outputs[] = {
  185.     {
  186.         .name = "default",
  187.         .type = AVMEDIA_TYPE_VIDEO,
  188.     },
  189.     { NULL }
  190. };
  191.  
  192. AVFilter ff_vf_hflip = {
  193.     .name          = "hflip",
  194.     .description   = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
  195.     .priv_size     = sizeof(FlipContext),
  196.     .query_formats = query_formats,
  197.     .inputs        = avfilter_vf_hflip_inputs,
  198.     .outputs       = avfilter_vf_hflip_outputs,
  199.     .flags         = AVFILTER_FLAG_SLICE_THREADS,
  200. };
  201.