Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * WebM DASH Manifest XML muxer
  3.  * Copyright (c) 2014 Vignesh Venkatasubramanian
  4.  *
  5.  * This file is part of FFmpeg.
  6.  *
  7.  * FFmpeg is free software; you can redistribute it and/or
  8.  * modify it under the terms of the GNU Lesser General Public
  9.  * License as published by the Free Software Foundation; either
  10.  * version 2.1 of the License, or (at your option) any later version.
  11.  *
  12.  * FFmpeg is distributed in the hope that it will be useful,
  13.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15.  * Lesser General Public License for more details.
  16.  *
  17.  * You should have received a copy of the GNU Lesser General Public
  18.  * License along with FFmpeg; if not, write to the Free Software
  19.  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20.  */
  21.  
  22. /*
  23.  * WebM DASH Specification:
  24.  * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
  25.  * ISO DASH Specification:
  26.  * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
  27.  */
  28.  
  29. #include <float.h>
  30. #include <stdint.h>
  31. #include <string.h>
  32.  
  33. #include "avformat.h"
  34. #include "avio_internal.h"
  35. #include "matroska.h"
  36.  
  37. #include "libavutil/avstring.h"
  38. #include "libavutil/dict.h"
  39. #include "libavutil/opt.h"
  40. #include "libavutil/time_internal.h"
  41.  
  42. typedef struct AdaptationSet {
  43.     char id[10];
  44.     int *streams;
  45.     int nb_streams;
  46. } AdaptationSet;
  47.  
  48. typedef struct WebMDashMuxContext {
  49.     const AVClass  *class;
  50.     char *adaptation_sets;
  51.     AdaptationSet *as;
  52.     int nb_as;
  53.     int representation_id;
  54.     int is_live;
  55.     int chunk_start_index;
  56.     int chunk_duration;
  57.     char *utc_timing_url;
  58.     double time_shift_buffer_depth;
  59.     int minimum_update_period;
  60.     int debug_mode;
  61. } WebMDashMuxContext;
  62.  
  63. static const char *get_codec_name(int codec_id)
  64. {
  65.     switch (codec_id) {
  66.         case AV_CODEC_ID_VP8:
  67.             return "vp8";
  68.         case AV_CODEC_ID_VP9:
  69.             return "vp9";
  70.         case AV_CODEC_ID_VORBIS:
  71.             return "vorbis";
  72.         case AV_CODEC_ID_OPUS:
  73.             return "opus";
  74.     }
  75.     return NULL;
  76. }
  77.  
  78. static double get_duration(AVFormatContext *s)
  79. {
  80.     int i = 0;
  81.     double max = 0.0;
  82.     for (i = 0; i < s->nb_streams; i++) {
  83.         AVDictionaryEntry *duration = av_dict_get(s->streams[i]->metadata,
  84.                                                   DURATION, NULL, 0);
  85.         if (!duration || atof(duration->value) < 0) continue;
  86.         if (atof(duration->value) > max) max = atof(duration->value);
  87.     }
  88.     return max / 1000;
  89. }
  90.  
  91. static int write_header(AVFormatContext *s)
  92. {
  93.     WebMDashMuxContext *w = s->priv_data;
  94.     double min_buffer_time = 1.0;
  95.     avio_printf(s->pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
  96.     avio_printf(s->pb, "<MPD\n");
  97.     avio_printf(s->pb, "  xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
  98.     avio_printf(s->pb, "  xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  99.     avio_printf(s->pb, "  xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  100.     avio_printf(s->pb, "  type=\"%s\"\n", w->is_live ? "dynamic" : "static");
  101.     if (!w->is_live) {
  102.         avio_printf(s->pb, "  mediaPresentationDuration=\"PT%gS\"\n",
  103.                     get_duration(s));
  104.     }
  105.     avio_printf(s->pb, "  minBufferTime=\"PT%gS\"\n", min_buffer_time);
  106.     avio_printf(s->pb, "  profiles=\"%s\"%s",
  107.                 w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
  108.                 w->is_live ? "\n" : ">\n");
  109.     if (w->is_live) {
  110.         time_t local_time = time(NULL);
  111.         struct tm gmt_buffer;
  112.         struct tm *gmt = gmtime_r(&local_time, &gmt_buffer);
  113.         char gmt_iso[21];
  114.         if (!strftime(gmt_iso, 21, "%Y-%m-%dT%H:%M:%SZ", gmt)) {
  115.             return AVERROR_UNKNOWN;
  116.         }
  117.         if (w->debug_mode) {
  118.             av_strlcpy(gmt_iso, "", 1);
  119.         }
  120.         avio_printf(s->pb, "  availabilityStartTime=\"%s\"\n", gmt_iso);
  121.         avio_printf(s->pb, "  timeShiftBufferDepth=\"PT%gS\"\n", w->time_shift_buffer_depth);
  122.         avio_printf(s->pb, "  minimumUpdatePeriod=\"PT%dS\"", w->minimum_update_period);
  123.         avio_printf(s->pb, ">\n");
  124.         if (w->utc_timing_url) {
  125.             avio_printf(s->pb, "<UTCTiming\n");
  126.             avio_printf(s->pb, "  schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
  127.             avio_printf(s->pb, "  value=\"%s\"/>\n", w->utc_timing_url);
  128.         }
  129.     }
  130.     return 0;
  131. }
  132.  
  133. static void write_footer(AVFormatContext *s)
  134. {
  135.     avio_printf(s->pb, "</MPD>\n");
  136. }
  137.  
  138. static int subsegment_alignment(AVFormatContext *s, AdaptationSet *as) {
  139.     int i;
  140.     AVDictionaryEntry *gold = av_dict_get(s->streams[as->streams[0]]->metadata,
  141.                                           CUE_TIMESTAMPS, NULL, 0);
  142.     if (!gold) return 0;
  143.     for (i = 1; i < as->nb_streams; i++) {
  144.         AVDictionaryEntry *ts = av_dict_get(s->streams[as->streams[i]]->metadata,
  145.                                             CUE_TIMESTAMPS, NULL, 0);
  146.         if (!ts || strncmp(gold->value, ts->value, strlen(gold->value))) return 0;
  147.     }
  148.     return 1;
  149. }
  150.  
  151. static int bitstream_switching(AVFormatContext *s, AdaptationSet *as) {
  152.     int i;
  153.     AVDictionaryEntry *gold_track_num = av_dict_get(s->streams[as->streams[0]]->metadata,
  154.                                                     TRACK_NUMBER, NULL, 0);
  155.     AVCodecContext *gold_codec = s->streams[as->streams[0]]->codec;
  156.     if (!gold_track_num) return 0;
  157.     for (i = 1; i < as->nb_streams; i++) {
  158.         AVDictionaryEntry *track_num = av_dict_get(s->streams[as->streams[i]]->metadata,
  159.                                                    TRACK_NUMBER, NULL, 0);
  160.         AVCodecContext *codec = s->streams[as->streams[i]]->codec;
  161.         if (!track_num ||
  162.             strncmp(gold_track_num->value, track_num->value, strlen(gold_track_num->value)) ||
  163.             gold_codec->codec_id != codec->codec_id ||
  164.             gold_codec->extradata_size != codec->extradata_size ||
  165.             memcmp(gold_codec->extradata, codec->extradata, codec->extradata_size)) {
  166.             return 0;
  167.         }
  168.     }
  169.     return 1;
  170. }
  171.  
  172. /*
  173.  * Writes a Representation within an Adaptation Set. Returns 0 on success and
  174.  * < 0 on failure.
  175.  */
  176. static int write_representation(AVFormatContext *s, AVStream *stream, char *id,
  177.                                 int output_width, int output_height,
  178.                                 int output_sample_rate) {
  179.     WebMDashMuxContext *w = s->priv_data;
  180.     AVDictionaryEntry *irange = av_dict_get(stream->metadata, INITIALIZATION_RANGE, NULL, 0);
  181.     AVDictionaryEntry *cues_start = av_dict_get(stream->metadata, CUES_START, NULL, 0);
  182.     AVDictionaryEntry *cues_end = av_dict_get(stream->metadata, CUES_END, NULL, 0);
  183.     AVDictionaryEntry *filename = av_dict_get(stream->metadata, FILENAME, NULL, 0);
  184.     AVDictionaryEntry *bandwidth = av_dict_get(stream->metadata, BANDWIDTH, NULL, 0);
  185.     if ((w->is_live && (!filename)) ||
  186.         (!w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
  187.         return AVERROR_INVALIDDATA;
  188.     }
  189.     avio_printf(s->pb, "<Representation id=\"%s\"", id);
  190.     // FIXME: For live, This should be obtained from the input file or as an AVOption.
  191.     avio_printf(s->pb, " bandwidth=\"%s\"",
  192.                 w->is_live ? (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO ? "128000" : "1000000") : bandwidth->value);
  193.     if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
  194.         avio_printf(s->pb, " width=\"%d\"", stream->codec->width);
  195.     if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
  196.         avio_printf(s->pb, " height=\"%d\"", stream->codec->height);
  197.     if (stream->codec->codec_type = AVMEDIA_TYPE_AUDIO && output_sample_rate)
  198.         avio_printf(s->pb, " audioSamplingRate=\"%d\"", stream->codec->sample_rate);
  199.     if (w->is_live) {
  200.         // For live streams, Codec and Mime Type always go in the Representation tag.
  201.         avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(stream->codec->codec_id));
  202.         avio_printf(s->pb, " mimeType=\"%s/webm\"",
  203.                     stream->codec->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  204.         // For live streams, subsegments always start with key frames. So this
  205.         // is always 1.
  206.         avio_printf(s->pb, " startsWithSAP=\"1\"");
  207.         avio_printf(s->pb, ">");
  208.     } else {
  209.         avio_printf(s->pb, ">\n");
  210.         avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
  211.         avio_printf(s->pb, "<SegmentBase\n");
  212.         avio_printf(s->pb, "  indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
  213.         avio_printf(s->pb, "<Initialization\n");
  214.         avio_printf(s->pb, "  range=\"0-%s\" />\n", irange->value);
  215.         avio_printf(s->pb, "</SegmentBase>\n");
  216.     }
  217.     avio_printf(s->pb, "</Representation>\n");
  218.     return 0;
  219. }
  220.  
  221. /*
  222.  * Checks if width of all streams are the same. Returns 1 if true, 0 otherwise.
  223.  */
  224. static int check_matching_width(AVFormatContext *s, AdaptationSet *as) {
  225.     int first_width, i;
  226.     if (as->nb_streams < 2) return 1;
  227.     first_width = s->streams[as->streams[0]]->codec->width;
  228.     for (i = 1; i < as->nb_streams; i++)
  229.         if (first_width != s->streams[as->streams[i]]->codec->width)
  230.           return 0;
  231.     return 1;
  232. }
  233.  
  234. /*
  235.  * Checks if height of all streams are the same. Returns 1 if true, 0 otherwise.
  236.  */
  237. static int check_matching_height(AVFormatContext *s, AdaptationSet *as) {
  238.     int first_height, i;
  239.     if (as->nb_streams < 2) return 1;
  240.     first_height = s->streams[as->streams[0]]->codec->height;
  241.     for (i = 1; i < as->nb_streams; i++)
  242.         if (first_height != s->streams[as->streams[i]]->codec->height)
  243.           return 0;
  244.     return 1;
  245. }
  246.  
  247. /*
  248.  * Checks if sample rate of all streams are the same. Returns 1 if true, 0 otherwise.
  249.  */
  250. static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as) {
  251.     int first_sample_rate, i;
  252.     if (as->nb_streams < 2) return 1;
  253.     first_sample_rate = s->streams[as->streams[0]]->codec->sample_rate;
  254.     for (i = 1; i < as->nb_streams; i++)
  255.         if (first_sample_rate != s->streams[as->streams[i]]->codec->sample_rate)
  256.           return 0;
  257.     return 1;
  258. }
  259.  
  260. static void free_adaptation_sets(AVFormatContext *s) {
  261.     WebMDashMuxContext *w = s->priv_data;
  262.     int i;
  263.     for (i = 0; i < w->nb_as; i++) {
  264.         av_freep(&w->as[i].streams);
  265.     }
  266.     av_freep(&w->as);
  267.     w->nb_as = 0;
  268. }
  269.  
  270. /*
  271.  * Parses a live header filename and computes the representation id,
  272.  * initialization pattern and the media pattern. Pass NULL if you don't want to
  273.  * compute any of those 3. Returns 0 on success and non-zero on failure.
  274.  *
  275.  * Name of the header file should conform to the following pattern:
  276.  * <file_description>_<representation_id>.hdr where <file_description> can be
  277.  * anything. The chunks should be named according to the following pattern:
  278.  * <file_description>_<representation_id>_<chunk_number>.chk
  279.  */
  280. static int parse_filename(char *filename, char **representation_id,
  281.                           char **initialization_pattern, char **media_pattern) {
  282.     char *underscore_pos = NULL;
  283.     char *period_pos = NULL;
  284.     char *temp_pos = NULL;
  285.     char *filename_str = av_strdup(filename);
  286.     if (!filename_str) return AVERROR(ENOMEM);
  287.     temp_pos = av_stristr(filename_str, "_");
  288.     while (temp_pos) {
  289.         underscore_pos = temp_pos + 1;
  290.         temp_pos = av_stristr(temp_pos + 1, "_");
  291.     }
  292.     if (!underscore_pos) return AVERROR_INVALIDDATA;
  293.     period_pos = av_stristr(underscore_pos, ".");
  294.     if (!period_pos) return AVERROR_INVALIDDATA;
  295.     *(underscore_pos - 1) = 0;
  296.     if (representation_id) {
  297.         *representation_id = av_malloc(period_pos - underscore_pos + 1);
  298.         if (!(*representation_id)) return AVERROR(ENOMEM);
  299.         av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
  300.     }
  301.     if (initialization_pattern) {
  302.         *initialization_pattern = av_asprintf("%s_$RepresentationID$.hdr",
  303.                                               filename_str);
  304.         if (!(*initialization_pattern)) return AVERROR(ENOMEM);
  305.     }
  306.     if (media_pattern) {
  307.         *media_pattern = av_asprintf("%s_$RepresentationID$_$Number$.chk",
  308.                                      filename_str);
  309.         if (!(*media_pattern)) return AVERROR(ENOMEM);
  310.     }
  311.     av_free(filename_str);
  312.     return 0;
  313. }
  314.  
  315. /*
  316.  * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
  317.  */
  318. static int write_adaptation_set(AVFormatContext *s, int as_index)
  319. {
  320.     WebMDashMuxContext *w = s->priv_data;
  321.     AdaptationSet *as = &w->as[as_index];
  322.     AVCodecContext *codec = s->streams[as->streams[0]]->codec;
  323.     AVDictionaryEntry *lang;
  324.     int i;
  325.     static const char boolean[2][6] = { "false", "true" };
  326.     int subsegmentStartsWithSAP = 1;
  327.  
  328.     // Width, Height and Sample Rate will go in the AdaptationSet tag if they
  329.     // are the same for all contained Representations. otherwise, they will go
  330.     // on their respective Representation tag. For live streams, they always go
  331.     // in the Representation tag.
  332.     int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
  333.     if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  334.       width_in_as = !w->is_live && check_matching_width(s, as);
  335.       height_in_as = !w->is_live && check_matching_height(s, as);
  336.     } else {
  337.       sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
  338.     }
  339.  
  340.     avio_printf(s->pb, "<AdaptationSet id=\"%s\"", as->id);
  341.     avio_printf(s->pb, " mimeType=\"%s/webm\"",
  342.                 codec->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  343.     avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(codec->codec_id));
  344.  
  345.     lang = av_dict_get(s->streams[as->streams[0]]->metadata, "language", NULL, 0);
  346.     if (lang) avio_printf(s->pb, " lang=\"%s\"", lang->value);
  347.  
  348.     if (codec->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as)
  349.         avio_printf(s->pb, " width=\"%d\"", codec->width);
  350.     if (codec->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as)
  351.         avio_printf(s->pb, " height=\"%d\"", codec->height);
  352.     if (codec->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as)
  353.         avio_printf(s->pb, " audioSamplingRate=\"%d\"", codec->sample_rate);
  354.  
  355.     avio_printf(s->pb, " bitstreamSwitching=\"%s\"",
  356.                 boolean[bitstream_switching(s, as)]);
  357.     avio_printf(s->pb, " subsegmentAlignment=\"%s\"",
  358.                 boolean[w->is_live || subsegment_alignment(s, as)]);
  359.  
  360.     for (i = 0; i < as->nb_streams; i++) {
  361.         AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
  362.                                             CLUSTER_KEYFRAME, NULL, 0);
  363.         if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
  364.     }
  365.     avio_printf(s->pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
  366.     avio_printf(s->pb, ">\n");
  367.  
  368.     if (w->is_live) {
  369.         AVDictionaryEntry *filename =
  370.             av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0);
  371.         char *initialization_pattern = NULL;
  372.         char *media_pattern = NULL;
  373.         int ret = parse_filename(filename->value, NULL, &initialization_pattern,
  374.                                  &media_pattern);
  375.         if (ret) return ret;
  376.         avio_printf(s->pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
  377.                     codec->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  378.         avio_printf(s->pb, "<SegmentTemplate");
  379.         avio_printf(s->pb, " timescale=\"1000\"");
  380.         avio_printf(s->pb, " duration=\"%d\"", w->chunk_duration);
  381.         avio_printf(s->pb, " media=\"%s\"", media_pattern);
  382.         avio_printf(s->pb, " startNumber=\"%d\"", w->chunk_start_index);
  383.         avio_printf(s->pb, " initialization=\"%s\"", initialization_pattern);
  384.         avio_printf(s->pb, "/>\n");
  385.         av_free(initialization_pattern);
  386.         av_free(media_pattern);
  387.     }
  388.  
  389.     for (i = 0; i < as->nb_streams; i++) {
  390.         char *representation_id = NULL;
  391.         int ret;
  392.         if (w->is_live) {
  393.             AVDictionaryEntry *filename =
  394.                 av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0);
  395.             if (!filename)
  396.                 return AVERROR(EINVAL);
  397.             if (ret = parse_filename(filename->value, &representation_id, NULL, NULL))
  398.                 return ret;
  399.         } else {
  400.             representation_id = av_asprintf("%d", w->representation_id++);
  401.             if (!representation_id) return AVERROR(ENOMEM);
  402.         }
  403.         ret = write_representation(s, s->streams[as->streams[i]],
  404.                                    representation_id, !width_in_as,
  405.                                    !height_in_as, !sample_rate_in_as);
  406.         av_free(representation_id);
  407.         if (ret) return ret;
  408.     }
  409.     avio_printf(s->pb, "</AdaptationSet>\n");
  410.     return 0;
  411. }
  412.  
  413. static int to_integer(char *p, int len)
  414. {
  415.     int ret;
  416.     char *q = av_malloc(sizeof(char) * len);
  417.     if (!q)
  418.         return AVERROR(ENOMEM);
  419.     av_strlcpy(q, p, len);
  420.     ret = atoi(q);
  421.     av_free(q);
  422.     return ret;
  423. }
  424.  
  425. static int parse_adaptation_sets(AVFormatContext *s)
  426. {
  427.     WebMDashMuxContext *w = s->priv_data;
  428.     char *p = w->adaptation_sets;
  429.     char *q;
  430.     enum { new_set, parsed_id, parsing_streams } state;
  431.     // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  432.     state = new_set;
  433.     while (p < w->adaptation_sets + strlen(w->adaptation_sets)) {
  434.         if (*p == ' ')
  435.             continue;
  436.         else if (state == new_set && !strncmp(p, "id=", 3)) {
  437.             void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1));
  438.             if (mem == NULL)
  439.                 return AVERROR(ENOMEM);
  440.             w->as = mem;
  441.             ++w->nb_as;
  442.             w->as[w->nb_as - 1].nb_streams = 0;
  443.             w->as[w->nb_as - 1].streams = NULL;
  444.             p += 3; // consume "id="
  445.             q = w->as[w->nb_as - 1].id;
  446.             while (*p != ',') *q++ = *p++;
  447.             *q = 0;
  448.             p++;
  449.             state = parsed_id;
  450.         } else if (state == parsed_id && !strncmp(p, "streams=", 8)) {
  451.             p += 8; // consume "streams="
  452.             state = parsing_streams;
  453.         } else if (state == parsing_streams) {
  454.             struct AdaptationSet *as = &w->as[w->nb_as - 1];
  455.             q = p;
  456.             while (*q != '\0' && *q != ',' && *q != ' ') q++;
  457.             as->streams = av_realloc(as->streams, sizeof(*as->streams) * ++as->nb_streams);
  458.             if (as->streams == NULL)
  459.                 return AVERROR(ENOMEM);
  460.             as->streams[as->nb_streams - 1] = to_integer(p, q - p + 1);
  461.             if (as->streams[as->nb_streams - 1] < 0) return -1;
  462.             if (*q == '\0') break;
  463.             if (*q == ' ') state = new_set;
  464.             p = ++q;
  465.         } else {
  466.             return -1;
  467.         }
  468.     }
  469.     return 0;
  470. }
  471.  
  472. static int webm_dash_manifest_write_header(AVFormatContext *s)
  473. {
  474.     int i;
  475.     double start = 0.0;
  476.     int ret;
  477.     WebMDashMuxContext *w = s->priv_data;
  478.     ret = parse_adaptation_sets(s);
  479.     if (ret < 0) {
  480.         goto fail;
  481.     }
  482.     ret = write_header(s);
  483.     if (ret < 0) {
  484.         goto fail;
  485.     }
  486.     avio_printf(s->pb, "<Period id=\"0\"");
  487.     avio_printf(s->pb, " start=\"PT%gS\"", start);
  488.     if (!w->is_live) {
  489.         avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
  490.     }
  491.     avio_printf(s->pb, " >\n");
  492.  
  493.     for (i = 0; i < w->nb_as; i++) {
  494.         ret = write_adaptation_set(s, i);
  495.         if (ret < 0) {
  496.             goto fail;
  497.         }
  498.     }
  499.  
  500.     avio_printf(s->pb, "</Period>\n");
  501.     write_footer(s);
  502. fail:
  503.     free_adaptation_sets(s);
  504.     return ret < 0 ? ret : 0;
  505. }
  506.  
  507. static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
  508. {
  509.     return AVERROR_EOF;
  510. }
  511.  
  512. static int webm_dash_manifest_write_trailer(AVFormatContext *s)
  513. {
  514.     free_adaptation_sets(s);
  515.     return 0;
  516. }
  517.  
  518. #define OFFSET(x) offsetof(WebMDashMuxContext, x)
  519. static const AVOption options[] = {
  520.     { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  521.     { "debug_mode", "[private option - users should never set this]. set this to 1 to create deterministic output", OFFSET(debug_mode), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  522.     { "live", "set this to 1 to create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  523.     { "chunk_start_index",  "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  524.     { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  525.     { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  526.     { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  527.     { "minimum_update_period", "Minimum Update Period (in seconds) of the manifest.", OFFSET(minimum_update_period), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  528.     { NULL },
  529. };
  530.  
  531. #if CONFIG_WEBM_DASH_MANIFEST_MUXER
  532. static const AVClass webm_dash_class = {
  533.     .class_name = "WebM DASH Manifest muxer",
  534.     .item_name  = av_default_item_name,
  535.     .option     = options,
  536.     .version    = LIBAVUTIL_VERSION_INT,
  537. };
  538.  
  539. AVOutputFormat ff_webm_dash_manifest_muxer = {
  540.     .name              = "webm_dash_manifest",
  541.     .long_name         = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
  542.     .mime_type         = "application/xml",
  543.     .extensions        = "xml",
  544.     .priv_data_size    = sizeof(WebMDashMuxContext),
  545.     .write_header      = webm_dash_manifest_write_header,
  546.     .write_packet      = webm_dash_manifest_write_packet,
  547.     .write_trailer     = webm_dash_manifest_write_trailer,
  548.     .priv_class        = &webm_dash_class,
  549. };
  550. #endif
  551.