Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2007 Bobby Bingham
  3.  *
  4.  * This file is part of FFmpeg.
  5.  *
  6.  * FFmpeg is free software; you can redistribute it and/or
  7.  * modify it under the terms of the GNU Lesser General Public
  8.  * License as published by the Free Software Foundation; either
  9.  * version 2.1 of the License, or (at your option) any later version.
  10.  *
  11.  * FFmpeg is distributed in the hope that it will be useful,
  12.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14.  * Lesser General Public License for more details.
  15.  *
  16.  * You should have received a copy of the GNU Lesser General Public
  17.  * License along with FFmpeg; if not, write to the Free Software
  18.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19.  */
  20.  
  21. /**
  22.  * @file
  23.  * FIFO buffering filter
  24.  */
  25.  
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/common.h"
  29. #include "libavutil/mathematics.h"
  30. #include "libavutil/samplefmt.h"
  31.  
  32. #include "audio.h"
  33. #include "avfilter.h"
  34. #include "internal.h"
  35. #include "video.h"
  36.  
  37. typedef struct Buf {
  38.     AVFrame *frame;
  39.     struct Buf        *next;
  40. } Buf;
  41.  
  42. typedef struct {
  43.     Buf  root;
  44.     Buf *last;   ///< last buffered frame
  45.  
  46.     /**
  47.      * When a specific number of output samples is requested, the partial
  48.      * buffer is stored here
  49.      */
  50.     AVFrame *out;
  51.     int allocated_samples;      ///< number of samples out was allocated for
  52. } FifoContext;
  53.  
  54. static av_cold int init(AVFilterContext *ctx)
  55. {
  56.     FifoContext *fifo = ctx->priv;
  57.     fifo->last = &fifo->root;
  58.  
  59.     return 0;
  60. }
  61.  
  62. static av_cold void uninit(AVFilterContext *ctx)
  63. {
  64.     FifoContext *fifo = ctx->priv;
  65.     Buf *buf, *tmp;
  66.  
  67.     for (buf = fifo->root.next; buf; buf = tmp) {
  68.         tmp = buf->next;
  69.         av_frame_free(&buf->frame);
  70.         av_free(buf);
  71.     }
  72.  
  73.     av_frame_free(&fifo->out);
  74. }
  75.  
  76. static int add_to_queue(AVFilterLink *inlink, AVFrame *frame)
  77. {
  78.     FifoContext *fifo = inlink->dst->priv;
  79.  
  80.     fifo->last->next = av_mallocz(sizeof(Buf));
  81.     if (!fifo->last->next) {
  82.         av_frame_free(&frame);
  83.         return AVERROR(ENOMEM);
  84.     }
  85.  
  86.     fifo->last = fifo->last->next;
  87.     fifo->last->frame = frame;
  88.  
  89.     return 0;
  90. }
  91.  
  92. static void queue_pop(FifoContext *s)
  93. {
  94.     Buf *tmp = s->root.next->next;
  95.     if (s->last == s->root.next)
  96.         s->last = &s->root;
  97.     av_freep(&s->root.next);
  98.     s->root.next = tmp;
  99. }
  100.  
  101. /**
  102.  * Move data pointers and pts offset samples forward.
  103.  */
  104. static void buffer_offset(AVFilterLink *link, AVFrame *frame,
  105.                           int offset)
  106. {
  107.     int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
  108.     int planar = av_sample_fmt_is_planar(link->format);
  109.     int planes = planar ? nb_channels : 1;
  110.     int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
  111.     int i;
  112.  
  113.     av_assert0(frame->nb_samples > offset);
  114.  
  115.     for (i = 0; i < planes; i++)
  116.         frame->extended_data[i] += block_align * offset;
  117.     if (frame->data != frame->extended_data)
  118.         memcpy(frame->data, frame->extended_data,
  119.                FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data));
  120.     frame->linesize[0] -= block_align*offset;
  121.     frame->nb_samples -= offset;
  122.  
  123.     if (frame->pts != AV_NOPTS_VALUE) {
  124.         frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
  125.                                    link->time_base);
  126.     }
  127. }
  128.  
  129. static int calc_ptr_alignment(AVFrame *frame)
  130. {
  131.     int planes = av_sample_fmt_is_planar(frame->format) ?
  132.                  av_get_channel_layout_nb_channels(frame->channel_layout) : 1;
  133.     int min_align = 128;
  134.     int p;
  135.  
  136.     for (p = 0; p < planes; p++) {
  137.         int cur_align = 128;
  138.         while ((intptr_t)frame->extended_data[p] % cur_align)
  139.             cur_align >>= 1;
  140.         if (cur_align < min_align)
  141.             min_align = cur_align;
  142.     }
  143.     return min_align;
  144. }
  145.  
  146. static int return_audio_frame(AVFilterContext *ctx)
  147. {
  148.     AVFilterLink *link = ctx->outputs[0];
  149.     FifoContext *s = ctx->priv;
  150.     AVFrame *head = s->root.next ? s->root.next->frame : NULL;
  151.     AVFrame *out;
  152.     int ret;
  153.  
  154.     /* if head is NULL then we're flushing the remaining samples in out */
  155.     if (!head && !s->out)
  156.         return AVERROR_EOF;
  157.  
  158.     if (!s->out &&
  159.         head->nb_samples >= link->request_samples &&
  160.         calc_ptr_alignment(head) >= 32) {
  161.         if (head->nb_samples == link->request_samples) {
  162.             out = head;
  163.             queue_pop(s);
  164.         } else {
  165.             out = av_frame_clone(head);
  166.             if (!out)
  167.                 return AVERROR(ENOMEM);
  168.  
  169.             out->nb_samples = link->request_samples;
  170.             buffer_offset(link, head, link->request_samples);
  171.         }
  172.     } else {
  173.         int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
  174.  
  175.         if (!s->out) {
  176.             s->out = ff_get_audio_buffer(link, link->request_samples);
  177.             if (!s->out)
  178.                 return AVERROR(ENOMEM);
  179.  
  180.             s->out->nb_samples = 0;
  181.             s->out->pts                   = head->pts;
  182.             s->allocated_samples          = link->request_samples;
  183.         } else if (link->request_samples != s->allocated_samples) {
  184.             av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
  185.                    "buffer was returned.\n");
  186.             return AVERROR(EINVAL);
  187.         }
  188.  
  189.         while (s->out->nb_samples < s->allocated_samples) {
  190.             int len;
  191.  
  192.             if (!s->root.next) {
  193.                 ret = ff_request_frame(ctx->inputs[0]);
  194.                 if (ret == AVERROR_EOF) {
  195.                     av_samples_set_silence(s->out->extended_data,
  196.                                            s->out->nb_samples,
  197.                                            s->allocated_samples -
  198.                                            s->out->nb_samples,
  199.                                            nb_channels, link->format);
  200.                     s->out->nb_samples = s->allocated_samples;
  201.                     break;
  202.                 } else if (ret < 0)
  203.                     return ret;
  204.                 av_assert0(s->root.next); // If ff_request_frame() succeeded then we should have a frame
  205.             }
  206.             head = s->root.next->frame;
  207.  
  208.             len = FFMIN(s->allocated_samples - s->out->nb_samples,
  209.                         head->nb_samples);
  210.  
  211.             av_samples_copy(s->out->extended_data, head->extended_data,
  212.                             s->out->nb_samples, 0, len, nb_channels,
  213.                             link->format);
  214.             s->out->nb_samples += len;
  215.  
  216.             if (len == head->nb_samples) {
  217.                 av_frame_free(&head);
  218.                 queue_pop(s);
  219.             } else {
  220.                 buffer_offset(link, head, len);
  221.             }
  222.         }
  223.         out = s->out;
  224.         s->out = NULL;
  225.     }
  226.     return ff_filter_frame(link, out);
  227. }
  228.  
  229. static int request_frame(AVFilterLink *outlink)
  230. {
  231.     FifoContext *fifo = outlink->src->priv;
  232.     int ret = 0;
  233.  
  234.     if (!fifo->root.next) {
  235.         if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) {
  236.             if (ret == AVERROR_EOF && outlink->request_samples)
  237.                 return return_audio_frame(outlink->src);
  238.             return ret;
  239.         }
  240.         av_assert0(fifo->root.next);
  241.     }
  242.  
  243.     if (outlink->request_samples) {
  244.         return return_audio_frame(outlink->src);
  245.     } else {
  246.         ret = ff_filter_frame(outlink, fifo->root.next->frame);
  247.         queue_pop(fifo);
  248.     }
  249.  
  250.     return ret;
  251. }
  252.  
  253. static const AVFilterPad avfilter_vf_fifo_inputs[] = {
  254.     {
  255.         .name             = "default",
  256.         .type             = AVMEDIA_TYPE_VIDEO,
  257.         .filter_frame     = add_to_queue,
  258.     },
  259.     { NULL }
  260. };
  261.  
  262. static const AVFilterPad avfilter_vf_fifo_outputs[] = {
  263.     {
  264.         .name          = "default",
  265.         .type          = AVMEDIA_TYPE_VIDEO,
  266.         .request_frame = request_frame,
  267.     },
  268.     { NULL }
  269. };
  270.  
  271. AVFilter avfilter_vf_fifo = {
  272.     .name      = "fifo",
  273.     .description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."),
  274.  
  275.     .init      = init,
  276.     .uninit    = uninit,
  277.  
  278.     .priv_size = sizeof(FifoContext),
  279.  
  280.     .inputs    = avfilter_vf_fifo_inputs,
  281.     .outputs   = avfilter_vf_fifo_outputs,
  282. };
  283.  
  284. static const AVFilterPad avfilter_af_afifo_inputs[] = {
  285.     {
  286.         .name             = "default",
  287.         .type             = AVMEDIA_TYPE_AUDIO,
  288.         .filter_frame     = add_to_queue,
  289.     },
  290.     { NULL }
  291. };
  292.  
  293. static const AVFilterPad avfilter_af_afifo_outputs[] = {
  294.     {
  295.         .name          = "default",
  296.         .type          = AVMEDIA_TYPE_AUDIO,
  297.         .request_frame = request_frame,
  298.     },
  299.     { NULL }
  300. };
  301.  
  302. AVFilter avfilter_af_afifo = {
  303.     .name        = "afifo",
  304.     .description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."),
  305.  
  306.     .init      = init,
  307.     .uninit    = uninit,
  308.  
  309.     .priv_size = sizeof(FifoContext),
  310.  
  311.     .inputs    = avfilter_af_afifo_inputs,
  312.     .outputs   = avfilter_af_afifo_outputs,
  313. };
  314.