2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "avio_internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/mathematics.h"
36 #include "audiointerleave.h"
51 * various utility functions for use within Libav
54 unsigned avformat_version(void)
56 return LIBAVFORMAT_VERSION_INT;
59 const char *avformat_configuration(void)
61 return LIBAV_CONFIGURATION;
64 const char *avformat_license(void)
66 #define LICENSE_PREFIX "libavformat license: "
67 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
70 /* fraction handling */
73 * f = val + (num / den) + 0.5.
75 * 'num' is normalized so that it is such as 0 <= num < den.
77 * @param f fractional number
78 * @param val integer value
79 * @param num must be >= 0
80 * @param den must be >= 1
82 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
95 * Fractional addition to f: f = f + (incr / f->den).
97 * @param f fractional number
98 * @param incr increment, can be positive or negative
100 static void frac_add(AVFrac *f, int64_t incr)
113 } else if (num >= den) {
120 /** head of registered input format linked list */
121 static AVInputFormat *first_iformat = NULL;
122 /** head of registered output format linked list */
123 static AVOutputFormat *first_oformat = NULL;
125 AVInputFormat *av_iformat_next(AVInputFormat *f)
127 if(f) return f->next;
128 else return first_iformat;
131 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
133 if(f) return f->next;
134 else return first_oformat;
137 void av_register_input_format(AVInputFormat *format)
141 while (*p != NULL) p = &(*p)->next;
146 void av_register_output_format(AVOutputFormat *format)
150 while (*p != NULL) p = &(*p)->next;
155 int av_match_ext(const char *filename, const char *extensions)
163 ext = strrchr(filename, '.');
169 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
172 if (!strcasecmp(ext1, ext))
182 static int match_format(const char *name, const char *names)
190 namelen = strlen(name);
191 while ((p = strchr(names, ','))) {
192 len = FFMAX(p - names, namelen);
193 if (!strncasecmp(name, names, len))
197 return !strcasecmp(name, names);
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
203 AVOutputFormat *fmt = NULL, *fmt_found;
204 int score_max, score;
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
214 /* Find the proper file type. */
217 while ((fmt = av_oformat_next(fmt))) {
219 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
221 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
223 if (filename && fmt->extensions &&
224 av_match_ext(filename, fmt->extensions)) {
227 if (score > score_max) {
235 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
236 const char *filename, const char *mime_type, enum AVMediaType type){
237 if(type == AVMEDIA_TYPE_VIDEO){
238 enum CodecID codec_id= CODEC_ID_NONE;
240 #if CONFIG_IMAGE2_MUXER
241 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
242 codec_id= ff_guess_image2_codec(filename);
245 if(codec_id == CODEC_ID_NONE)
246 codec_id= fmt->video_codec;
248 }else if(type == AVMEDIA_TYPE_AUDIO)
249 return fmt->audio_codec;
250 else if (type == AVMEDIA_TYPE_SUBTITLE)
251 return fmt->subtitle_codec;
253 return CODEC_ID_NONE;
256 AVInputFormat *av_find_input_format(const char *short_name)
258 AVInputFormat *fmt = NULL;
259 while ((fmt = av_iformat_next(fmt))) {
260 if (match_format(short_name, fmt->name))
267 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
269 int ret= av_new_packet(pkt, size);
274 pkt->pos= avio_tell(s);
276 ret= avio_read(s, pkt->data, size);
280 av_shrink_packet(pkt, ret);
285 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
290 return av_get_packet(s, pkt, size);
291 old_size = pkt->size;
292 ret = av_grow_packet(pkt, size);
295 ret = avio_read(s, pkt->data + old_size, size);
296 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
301 int av_filename_number_test(const char *filename)
304 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
307 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
309 AVProbeData lpd = *pd;
310 AVInputFormat *fmt1 = NULL, *fmt;
313 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
314 int id3len = ff_id3v2_tag_len(lpd.buf);
315 if (lpd.buf_size > id3len + 16) {
317 lpd.buf_size -= id3len;
323 while ((fmt1 = av_iformat_next(fmt1))) {
324 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
327 if (fmt1->read_probe) {
328 score = fmt1->read_probe(&lpd);
329 } else if (fmt1->extensions) {
330 if (av_match_ext(lpd.filename, fmt1->extensions)) {
334 if (score > *score_max) {
337 }else if (score == *score_max)
341 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
342 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4) {
343 while ((fmt = av_iformat_next(fmt)))
344 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
345 *score_max = AVPROBE_SCORE_MAX/4;
353 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
355 return av_probe_input_format2(pd, is_opened, &score);
358 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
360 static const struct {
361 const char *name; enum CodecID id; enum AVMediaType type;
363 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
364 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
365 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
366 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
367 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
368 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
369 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
370 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
373 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
377 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
378 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
379 for (i = 0; fmt_id_type[i].name; i++) {
380 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
381 st->codec->codec_id = fmt_id_type[i].id;
382 st->codec->codec_type = fmt_id_type[i].type;
390 /************************************************************/
391 /* input media file */
393 #if FF_API_FORMAT_PARAMETERS
394 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
397 AVDictionary *opts = NULL;
402 if (ap->time_base.num) {
403 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
404 av_dict_set(&opts, "framerate", buf, 0);
406 if (ap->sample_rate) {
407 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
408 av_dict_set(&opts, "sample_rate", buf, 0);
411 snprintf(buf, sizeof(buf), "%d", ap->channels);
412 av_dict_set(&opts, "channels", buf, 0);
414 if (ap->width || ap->height) {
415 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
416 av_dict_set(&opts, "video_size", buf, 0);
418 if (ap->pix_fmt != PIX_FMT_NONE) {
419 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
422 snprintf(buf, sizeof(buf), "%d", ap->channel);
423 av_dict_set(&opts, "channel", buf, 0);
426 av_dict_set(&opts, "standard", ap->standard, 0);
428 if (ap->mpeg2ts_compute_pcr) {
429 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
431 if (ap->initial_pause) {
432 av_dict_set(&opts, "initial_pause", "1", 0);
438 * Open a media file from an IO stream. 'fmt' must be specified.
440 int av_open_input_stream(AVFormatContext **ic_ptr,
441 AVIOContext *pb, const char *filename,
442 AVInputFormat *fmt, AVFormatParameters *ap)
447 AVFormatParameters default_ap;
451 memset(ap, 0, sizeof(default_ap));
453 opts = convert_format_parameters(ap);
455 if(!ap->prealloced_context)
456 ic = avformat_alloc_context();
460 err = AVERROR(ENOMEM);
463 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
464 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
465 "will be ignored with AVFMT_NOFILE format.\n");
469 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
471 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
480 /** size of probe buffer, for guessing file type from file contents */
481 #define PROBE_BUF_MIN 2048
482 #define PROBE_BUF_MAX (1<<20)
484 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
485 const char *filename, void *logctx,
486 unsigned int offset, unsigned int max_probe_size)
488 AVProbeData pd = { filename ? filename : "", NULL, -offset };
489 unsigned char *buf = NULL;
490 int ret = 0, probe_size;
492 if (!max_probe_size) {
493 max_probe_size = PROBE_BUF_MAX;
494 } else if (max_probe_size > PROBE_BUF_MAX) {
495 max_probe_size = PROBE_BUF_MAX;
496 } else if (max_probe_size < PROBE_BUF_MIN) {
497 return AVERROR(EINVAL);
500 if (offset >= max_probe_size) {
501 return AVERROR(EINVAL);
504 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
505 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
506 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
507 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
509 if (probe_size < offset) {
513 /* read probe data */
514 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
515 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
516 /* fail if error was not end of file, otherwise, lower score */
517 if (ret != AVERROR_EOF) {
522 ret = 0; /* error was end of file, nothing read */
525 pd.buf = &buf[offset];
527 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
529 /* guess file format */
530 *fmt = av_probe_input_format2(&pd, 1, &score);
532 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
533 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
535 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
541 return AVERROR_INVALIDDATA;
544 /* rewind. reuse probe buffer to avoid seeking */
545 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
551 #if FF_API_FORMAT_PARAMETERS
552 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
555 AVFormatParameters *ap)
558 AVDictionary *opts = convert_format_parameters(ap);
560 if (!ap || !ap->prealloced_context)
563 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
570 /* open input file and probe the format if necessary */
571 static int init_input(AVFormatContext *s, const char *filename)
574 AVProbeData pd = {filename, NULL, 0};
577 s->flags |= AVFMT_FLAG_CUSTOM_IO;
579 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
580 else if (s->iformat->flags & AVFMT_NOFILE)
581 return AVERROR(EINVAL);
585 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
586 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
589 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0)
593 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
596 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
598 AVFormatContext *s = *ps;
600 AVFormatParameters ap = { { 0 } };
601 AVDictionary *tmp = NULL;
603 if (!s && !(s = avformat_alloc_context()))
604 return AVERROR(ENOMEM);
609 av_dict_copy(&tmp, *options, 0);
611 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
614 if ((ret = init_input(s, filename)) < 0)
617 /* check filename in case an image number is expected */
618 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
619 if (!av_filename_number_test(filename)) {
620 ret = AVERROR(EINVAL);
625 s->duration = s->start_time = AV_NOPTS_VALUE;
626 av_strlcpy(s->filename, filename, sizeof(s->filename));
628 /* allocate private data */
629 if (s->iformat->priv_data_size > 0) {
630 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
631 ret = AVERROR(ENOMEM);
634 if (s->iformat->priv_class) {
635 *(const AVClass**)s->priv_data = s->iformat->priv_class;
636 av_opt_set_defaults(s->priv_data);
637 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
642 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
644 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
646 if (s->iformat->read_header)
647 if ((ret = s->iformat->read_header(s, &ap)) < 0)
650 if (s->pb && !s->data_offset)
651 s->data_offset = avio_tell(s->pb);
653 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
656 av_dict_free(options);
664 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
666 avformat_free_context(s);
671 /*******************************************************/
673 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
674 AVPacketList **plast_pktl){
675 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
680 (*plast_pktl)->next = pktl;
682 *packet_buffer = pktl;
684 /* add the packet in the buffered packet list */
690 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
696 AVPacketList *pktl = s->raw_packet_buffer;
700 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
701 !s->streams[pkt->stream_index]->probe_packets ||
702 s->raw_packet_buffer_remaining_size < pkt->size){
703 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
706 s->raw_packet_buffer = pktl->next;
707 s->raw_packet_buffer_remaining_size += pkt->size;
714 ret= s->iformat->read_packet(s, pkt);
716 if (!pktl || ret == AVERROR(EAGAIN))
718 for (i = 0; i < s->nb_streams; i++)
719 s->streams[i]->probe_packets = 0;
723 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
724 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
725 av_log(s, AV_LOG_WARNING,
726 "Dropped corrupted packet (stream = %d)\n",
731 st= s->streams[pkt->stream_index];
733 switch(st->codec->codec_type){
734 case AVMEDIA_TYPE_VIDEO:
735 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
737 case AVMEDIA_TYPE_AUDIO:
738 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
740 case AVMEDIA_TYPE_SUBTITLE:
741 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
745 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
749 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
750 s->raw_packet_buffer_remaining_size -= pkt->size;
752 if(st->codec->codec_id == CODEC_ID_PROBE){
753 AVProbeData *pd = &st->probe_data;
754 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
757 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
758 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
759 pd->buf_size += pkt->size;
760 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
762 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
763 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
764 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
765 if(st->codec->codec_id != CODEC_ID_PROBE){
768 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
775 /**********************************************************/
778 * Get the number of samples of an audio frame. Return -1 on error.
780 static int get_audio_frame_size(AVCodecContext *enc, int size)
784 if(enc->codec_id == CODEC_ID_VORBIS)
787 if (enc->frame_size <= 1) {
788 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
790 if (bits_per_sample) {
791 if (enc->channels == 0)
793 frame_size = (size << 3) / (bits_per_sample * enc->channels);
795 /* used for example by ADPCM codecs */
796 if (enc->bit_rate == 0)
798 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
801 frame_size = enc->frame_size;
808 * Return the frame duration in seconds. Return 0 if not available.
810 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
811 AVCodecParserContext *pc, AVPacket *pkt)
817 switch(st->codec->codec_type) {
818 case AVMEDIA_TYPE_VIDEO:
819 if(st->time_base.num*1000LL > st->time_base.den){
820 *pnum = st->time_base.num;
821 *pden = st->time_base.den;
822 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
823 *pnum = st->codec->time_base.num;
824 *pden = st->codec->time_base.den;
825 if (pc && pc->repeat_pict) {
826 *pnum = (*pnum) * (1 + pc->repeat_pict);
828 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
829 //Thus if we have no parser in such case leave duration undefined.
830 if(st->codec->ticks_per_frame>1 && !pc){
835 case AVMEDIA_TYPE_AUDIO:
836 frame_size = get_audio_frame_size(st->codec, pkt->size);
837 if (frame_size <= 0 || st->codec->sample_rate <= 0)
840 *pden = st->codec->sample_rate;
847 static int is_intra_only(AVCodecContext *enc){
848 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
850 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
851 switch(enc->codec_id){
853 case CODEC_ID_MJPEGB:
855 case CODEC_ID_RAWVIDEO:
856 case CODEC_ID_DVVIDEO:
857 case CODEC_ID_HUFFYUV:
858 case CODEC_ID_FFVHUFF:
863 case CODEC_ID_JPEG2000:
871 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
872 int64_t dts, int64_t pts)
874 AVStream *st= s->streams[stream_index];
875 AVPacketList *pktl= s->packet_buffer;
877 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
880 st->first_dts= dts - st->cur_dts;
883 for(; pktl; pktl= pktl->next){
884 if(pktl->pkt.stream_index != stream_index)
886 //FIXME think more about this check
887 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
888 pktl->pkt.pts += st->first_dts;
890 if(pktl->pkt.dts != AV_NOPTS_VALUE)
891 pktl->pkt.dts += st->first_dts;
893 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
894 st->start_time= pktl->pkt.pts;
896 if (st->start_time == AV_NOPTS_VALUE)
897 st->start_time = pts;
900 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
902 AVPacketList *pktl= s->packet_buffer;
905 if(st->first_dts != AV_NOPTS_VALUE){
906 cur_dts= st->first_dts;
907 for(; pktl; pktl= pktl->next){
908 if(pktl->pkt.stream_index == pkt->stream_index){
909 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
911 cur_dts -= pkt->duration;
914 pktl= s->packet_buffer;
915 st->first_dts = cur_dts;
916 }else if(st->cur_dts)
919 for(; pktl; pktl= pktl->next){
920 if(pktl->pkt.stream_index != pkt->stream_index)
922 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
923 && !pktl->pkt.duration){
924 pktl->pkt.dts= cur_dts;
925 if(!st->codec->has_b_frames)
926 pktl->pkt.pts= cur_dts;
927 cur_dts += pkt->duration;
928 pktl->pkt.duration= pkt->duration;
932 if(st->first_dts == AV_NOPTS_VALUE)
933 st->cur_dts= cur_dts;
936 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
937 AVCodecParserContext *pc, AVPacket *pkt)
939 int num, den, presentation_delayed, delay, i;
942 if (s->flags & AVFMT_FLAG_NOFILLIN)
945 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
946 pkt->dts= AV_NOPTS_VALUE;
948 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
949 //FIXME Set low_delay = 0 when has_b_frames = 1
950 st->codec->has_b_frames = 1;
952 /* do we have a video B-frame ? */
953 delay= st->codec->has_b_frames;
954 presentation_delayed = 0;
956 // ignore delay caused by frame threading so that the mpeg2-without-dts
957 // warning will not trigger
958 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
959 delay -= st->codec->thread_count-1;
961 /* XXX: need has_b_frame, but cannot get it if the codec is
964 pc && pc->pict_type != AV_PICTURE_TYPE_B)
965 presentation_delayed = 1;
967 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
968 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
969 pkt->dts -= 1LL<<st->pts_wrap_bits;
972 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
973 // we take the conservative approach and discard both
974 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
975 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
976 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
977 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
980 if (pkt->duration == 0) {
981 compute_frame_duration(&num, &den, st, pc, pkt);
983 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
985 if(pkt->duration != 0 && s->packet_buffer)
986 update_initial_durations(s, st, pkt);
990 /* correct timestamps with byte offset if demuxers only have timestamps
991 on packet boundaries */
992 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
993 /* this will estimate bitrate based on this frame's duration and size */
994 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
995 if(pkt->pts != AV_NOPTS_VALUE)
997 if(pkt->dts != AV_NOPTS_VALUE)
1001 if (pc && pc->dts_sync_point >= 0) {
1002 // we have synchronization info from the parser
1003 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1005 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1006 if (pkt->dts != AV_NOPTS_VALUE) {
1007 // got DTS from the stream, update reference timestamp
1008 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1009 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1010 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1011 // compute DTS based on reference timestamp
1012 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1013 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1015 if (pc->dts_sync_point > 0)
1016 st->reference_dts = pkt->dts; // new reference
1020 /* This may be redundant, but it should not hurt. */
1021 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1022 presentation_delayed = 1;
1024 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1025 /* interpolate PTS and DTS if they are not present */
1026 //We skip H264 currently because delay and has_b_frames are not reliably set
1027 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1028 if (presentation_delayed) {
1029 /* DTS = decompression timestamp */
1030 /* PTS = presentation timestamp */
1031 if (pkt->dts == AV_NOPTS_VALUE)
1032 pkt->dts = st->last_IP_pts;
1033 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1034 if (pkt->dts == AV_NOPTS_VALUE)
1035 pkt->dts = st->cur_dts;
1037 /* this is tricky: the dts must be incremented by the duration
1038 of the frame we are displaying, i.e. the last I- or P-frame */
1039 if (st->last_IP_duration == 0)
1040 st->last_IP_duration = pkt->duration;
1041 if(pkt->dts != AV_NOPTS_VALUE)
1042 st->cur_dts = pkt->dts + st->last_IP_duration;
1043 st->last_IP_duration = pkt->duration;
1044 st->last_IP_pts= pkt->pts;
1045 /* cannot compute PTS if not present (we can compute it only
1046 by knowing the future */
1047 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1048 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1049 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1050 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1051 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1052 pkt->pts += pkt->duration;
1053 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1057 /* presentation is not delayed : PTS and DTS are the same */
1058 if(pkt->pts == AV_NOPTS_VALUE)
1059 pkt->pts = pkt->dts;
1060 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1061 if(pkt->pts == AV_NOPTS_VALUE)
1062 pkt->pts = st->cur_dts;
1063 pkt->dts = pkt->pts;
1064 if(pkt->pts != AV_NOPTS_VALUE)
1065 st->cur_dts = pkt->pts + pkt->duration;
1069 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1070 st->pts_buffer[0]= pkt->pts;
1071 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1072 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1073 if(pkt->dts == AV_NOPTS_VALUE)
1074 pkt->dts= st->pts_buffer[0];
1075 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1076 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1078 if(pkt->dts > st->cur_dts)
1079 st->cur_dts = pkt->dts;
1082 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1085 if(is_intra_only(st->codec))
1086 pkt->flags |= AV_PKT_FLAG_KEY;
1089 /* keyframe computation */
1090 if (pc->key_frame == 1)
1091 pkt->flags |= AV_PKT_FLAG_KEY;
1092 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1093 pkt->flags |= AV_PKT_FLAG_KEY;
1096 pkt->convergence_duration = pc->convergence_duration;
1100 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1105 av_init_packet(pkt);
1108 /* select current input stream component */
1111 if (!st->need_parsing || !st->parser) {
1112 /* no parsing needed: we just output the packet as is */
1113 /* raw data support */
1114 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1115 compute_pkt_fields(s, st, NULL, pkt);
1117 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1118 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1119 ff_reduce_index(s, st->index);
1120 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1123 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1124 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1125 st->cur_ptr, st->cur_len,
1126 st->cur_pkt.pts, st->cur_pkt.dts,
1128 st->cur_pkt.pts = AV_NOPTS_VALUE;
1129 st->cur_pkt.dts = AV_NOPTS_VALUE;
1130 /* increment read pointer */
1134 /* return packet if any */
1138 pkt->stream_index = st->index;
1139 pkt->pts = st->parser->pts;
1140 pkt->dts = st->parser->dts;
1141 pkt->pos = st->parser->pos;
1142 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1144 pkt->destruct= st->cur_pkt.destruct;
1145 st->cur_pkt.destruct= NULL;
1146 st->cur_pkt.data = NULL;
1147 assert(st->cur_len == 0);
1149 pkt->destruct = NULL;
1151 compute_pkt_fields(s, st, st->parser, pkt);
1153 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1154 ff_reduce_index(s, st->index);
1155 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1156 0, 0, AVINDEX_KEYFRAME);
1163 av_free_packet(&st->cur_pkt);
1168 /* read next packet */
1169 ret = av_read_packet(s, &cur_pkt);
1171 if (ret == AVERROR(EAGAIN))
1173 /* return the last frames, if any */
1174 for(i = 0; i < s->nb_streams; i++) {
1176 if (st->parser && st->need_parsing) {
1177 av_parser_parse2(st->parser, st->codec,
1178 &pkt->data, &pkt->size,
1180 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1186 /* no more packets: really terminate parsing */
1189 st = s->streams[cur_pkt.stream_index];
1190 st->cur_pkt= cur_pkt;
1192 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1193 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1194 st->cur_pkt.pts < st->cur_pkt.dts){
1195 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1196 st->cur_pkt.stream_index,
1200 // av_free_packet(&st->cur_pkt);
1204 if(s->debug & FF_FDEBUG_TS)
1205 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1206 st->cur_pkt.stream_index,
1210 st->cur_pkt.duration,
1214 st->cur_ptr = st->cur_pkt.data;
1215 st->cur_len = st->cur_pkt.size;
1216 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1217 st->parser = av_parser_init(st->codec->codec_id);
1219 /* no parser available: just output the raw packets */
1220 st->need_parsing = AVSTREAM_PARSE_NONE;
1221 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1222 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1223 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1224 st->parser->flags |= PARSER_FLAG_ONCE;
1229 if(s->debug & FF_FDEBUG_TS)
1230 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1241 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1245 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1248 pktl = s->packet_buffer;
1250 AVPacket *next_pkt= &pktl->pkt;
1252 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1253 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1254 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1255 if( pktl->pkt.stream_index == next_pkt->stream_index
1256 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1257 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1258 next_pkt->pts= pktl->pkt.dts;
1262 pktl = s->packet_buffer;
1265 if( next_pkt->pts != AV_NOPTS_VALUE
1266 || next_pkt->dts == AV_NOPTS_VALUE
1268 /* read packet from packet buffer, if there is data */
1270 s->packet_buffer = pktl->next;
1276 int ret= read_frame_internal(s, pkt);
1278 if(pktl && ret != AVERROR(EAGAIN)){
1285 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1286 &s->packet_buffer_end)) < 0)
1287 return AVERROR(ENOMEM);
1289 assert(!s->packet_buffer);
1290 return read_frame_internal(s, pkt);
1295 /* XXX: suppress the packet queue */
1296 static void flush_packet_queue(AVFormatContext *s)
1301 pktl = s->packet_buffer;
1304 s->packet_buffer = pktl->next;
1305 av_free_packet(&pktl->pkt);
1308 while(s->raw_packet_buffer){
1309 pktl = s->raw_packet_buffer;
1310 s->raw_packet_buffer = pktl->next;
1311 av_free_packet(&pktl->pkt);
1314 s->packet_buffer_end=
1315 s->raw_packet_buffer_end= NULL;
1316 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1319 /*******************************************************/
1322 int av_find_default_stream_index(AVFormatContext *s)
1324 int first_audio_index = -1;
1328 if (s->nb_streams <= 0)
1330 for(i = 0; i < s->nb_streams; i++) {
1332 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1335 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1336 first_audio_index = i;
1338 return first_audio_index >= 0 ? first_audio_index : 0;
1342 * Flush the frame reader.
1344 void ff_read_frame_flush(AVFormatContext *s)
1349 flush_packet_queue(s);
1353 /* for each stream, reset read state */
1354 for(i = 0; i < s->nb_streams; i++) {
1358 av_parser_close(st->parser);
1360 av_free_packet(&st->cur_pkt);
1362 st->last_IP_pts = AV_NOPTS_VALUE;
1363 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1364 st->reference_dts = AV_NOPTS_VALUE;
1369 st->probe_packets = MAX_PROBE_PACKETS;
1371 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1372 st->pts_buffer[j]= AV_NOPTS_VALUE;
1376 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1379 for(i = 0; i < s->nb_streams; i++) {
1380 AVStream *st = s->streams[i];
1382 st->cur_dts = av_rescale(timestamp,
1383 st->time_base.den * (int64_t)ref_st->time_base.num,
1384 st->time_base.num * (int64_t)ref_st->time_base.den);
1388 void ff_reduce_index(AVFormatContext *s, int stream_index)
1390 AVStream *st= s->streams[stream_index];
1391 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1393 if((unsigned)st->nb_index_entries >= max_entries){
1395 for(i=0; 2*i<st->nb_index_entries; i++)
1396 st->index_entries[i]= st->index_entries[2*i];
1397 st->nb_index_entries= i;
1401 int ff_add_index_entry(AVIndexEntry **index_entries,
1402 int *nb_index_entries,
1403 unsigned int *index_entries_allocated_size,
1404 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1406 AVIndexEntry *entries, *ie;
1409 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1412 entries = av_fast_realloc(*index_entries,
1413 index_entries_allocated_size,
1414 (*nb_index_entries + 1) *
1415 sizeof(AVIndexEntry));
1419 *index_entries= entries;
1421 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1424 index= (*nb_index_entries)++;
1425 ie= &entries[index];
1426 assert(index==0 || ie[-1].timestamp < timestamp);
1428 ie= &entries[index];
1429 if(ie->timestamp != timestamp){
1430 if(ie->timestamp <= timestamp)
1432 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1433 (*nb_index_entries)++;
1434 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1435 distance= ie->min_distance;
1439 ie->timestamp = timestamp;
1440 ie->min_distance= distance;
1447 int av_add_index_entry(AVStream *st,
1448 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1450 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1451 &st->index_entries_allocated_size, pos,
1452 timestamp, size, distance, flags);
1455 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1456 int64_t wanted_timestamp, int flags)
1464 //optimize appending index entries at the end
1465 if(b && entries[b-1].timestamp < wanted_timestamp)
1470 timestamp = entries[m].timestamp;
1471 if(timestamp >= wanted_timestamp)
1473 if(timestamp <= wanted_timestamp)
1476 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1478 if(!(flags & AVSEEK_FLAG_ANY)){
1479 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1480 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1489 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1492 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1493 wanted_timestamp, flags);
1496 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1497 AVInputFormat *avif= s->iformat;
1498 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1499 int64_t ts_min, ts_max, ts;
1504 if (stream_index < 0)
1507 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1510 ts_min= AV_NOPTS_VALUE;
1511 pos_limit= -1; //gcc falsely says it may be uninitialized
1513 st= s->streams[stream_index];
1514 if(st->index_entries){
1517 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1518 index= FFMAX(index, 0);
1519 e= &st->index_entries[index];
1521 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1523 ts_min= e->timestamp;
1524 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1530 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1531 assert(index < st->nb_index_entries);
1533 e= &st->index_entries[index];
1534 assert(e->timestamp >= target_ts);
1536 ts_max= e->timestamp;
1537 pos_limit= pos_max - e->min_distance;
1538 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1539 pos_max,pos_limit, ts_max);
1543 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1548 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1551 av_update_cur_dts(s, st, ts);
1556 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1558 int64_t start_pos, filesize;
1561 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1563 if(ts_min == AV_NOPTS_VALUE){
1564 pos_min = s->data_offset;
1565 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1566 if (ts_min == AV_NOPTS_VALUE)
1570 if(ts_max == AV_NOPTS_VALUE){
1572 filesize = avio_size(s->pb);
1573 pos_max = filesize - 1;
1576 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1578 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1579 if (ts_max == AV_NOPTS_VALUE)
1583 int64_t tmp_pos= pos_max + 1;
1584 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1585 if(tmp_ts == AV_NOPTS_VALUE)
1589 if(tmp_pos >= filesize)
1595 if(ts_min > ts_max){
1597 }else if(ts_min == ts_max){
1602 while (pos_min < pos_limit) {
1603 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1604 pos_min, pos_max, ts_min, ts_max);
1605 assert(pos_limit <= pos_max);
1608 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1609 // interpolate position (better than dichotomy)
1610 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1611 + pos_min - approximate_keyframe_distance;
1612 }else if(no_change==1){
1613 // bisection, if interpolation failed to change min or max pos last time
1614 pos = (pos_min + pos_limit)>>1;
1616 /* linear search if bisection failed, can only happen if there
1617 are very few or no keyframes between min/max */
1622 else if(pos > pos_limit)
1626 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1631 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1632 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1633 pos_limit, start_pos, no_change);
1634 if(ts == AV_NOPTS_VALUE){
1635 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1638 assert(ts != AV_NOPTS_VALUE);
1639 if (target_ts <= ts) {
1640 pos_limit = start_pos - 1;
1644 if (target_ts >= ts) {
1650 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1651 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1653 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1655 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1656 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1657 pos, ts_min, target_ts, ts_max);
1662 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1663 int64_t pos_min, pos_max;
1667 if (stream_index < 0)
1670 st= s->streams[stream_index];
1673 pos_min = s->data_offset;
1674 pos_max = avio_size(s->pb) - 1;
1676 if (pos < pos_min) pos= pos_min;
1677 else if(pos > pos_max) pos= pos_max;
1679 avio_seek(s->pb, pos, SEEK_SET);
1682 av_update_cur_dts(s, st, ts);
1687 static int seek_frame_generic(AVFormatContext *s,
1688 int stream_index, int64_t timestamp, int flags)
1695 st = s->streams[stream_index];
1697 index = av_index_search_timestamp(st, timestamp, flags);
1699 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1702 if(index < 0 || index==st->nb_index_entries-1){
1706 if(st->nb_index_entries){
1707 assert(st->index_entries);
1708 ie= &st->index_entries[st->nb_index_entries-1];
1709 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1711 av_update_cur_dts(s, st, ie->timestamp);
1713 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1719 ret = av_read_frame(s, &pkt);
1720 }while(ret == AVERROR(EAGAIN));
1723 av_free_packet(&pkt);
1724 if(stream_index == pkt.stream_index){
1725 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1729 index = av_index_search_timestamp(st, timestamp, flags);
1734 ff_read_frame_flush(s);
1735 if (s->iformat->read_seek){
1736 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1739 ie = &st->index_entries[index];
1740 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1742 av_update_cur_dts(s, st, ie->timestamp);
1747 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1752 ff_read_frame_flush(s);
1754 if(flags & AVSEEK_FLAG_BYTE)
1755 return seek_frame_byte(s, stream_index, timestamp, flags);
1757 if(stream_index < 0){
1758 stream_index= av_find_default_stream_index(s);
1759 if(stream_index < 0)
1762 st= s->streams[stream_index];
1763 /* timestamp for default must be expressed in AV_TIME_BASE units */
1764 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1767 /* first, we try the format specific seek */
1768 if (s->iformat->read_seek)
1769 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1776 if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH))
1777 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1778 else if (!(s->iformat->flags & AVFMT_NOGENSEARCH))
1779 return seek_frame_generic(s, stream_index, timestamp, flags);
1784 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1786 if(min_ts > ts || max_ts < ts)
1789 ff_read_frame_flush(s);
1791 if (s->iformat->read_seek2)
1792 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1794 if(s->iformat->read_timestamp){
1795 //try to seek via read_timestamp()
1798 //Fallback to old API if new is not implemented but old is
1799 //Note the old has somewat different sematics
1800 if(s->iformat->read_seek || 1)
1801 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1803 // try some generic seek like seek_frame_generic() but with new ts semantics
1806 /*******************************************************/
1809 * Return TRUE if the stream has accurate duration in any stream.
1811 * @return TRUE if the stream has accurate duration for at least one component.
1813 static int has_duration(AVFormatContext *ic)
1818 for(i = 0;i < ic->nb_streams; i++) {
1819 st = ic->streams[i];
1820 if (st->duration != AV_NOPTS_VALUE)
1827 * Estimate the stream timings from the one of each components.
1829 * Also computes the global bitrate if possible.
1831 static void update_stream_timings(AVFormatContext *ic)
1833 int64_t start_time, start_time1, end_time, end_time1;
1834 int64_t duration, duration1;
1838 start_time = INT64_MAX;
1839 end_time = INT64_MIN;
1840 duration = INT64_MIN;
1841 for(i = 0;i < ic->nb_streams; i++) {
1842 st = ic->streams[i];
1843 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1844 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1845 if (start_time1 < start_time)
1846 start_time = start_time1;
1847 if (st->duration != AV_NOPTS_VALUE) {
1848 end_time1 = start_time1
1849 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1850 if (end_time1 > end_time)
1851 end_time = end_time1;
1854 if (st->duration != AV_NOPTS_VALUE) {
1855 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1856 if (duration1 > duration)
1857 duration = duration1;
1860 if (start_time != INT64_MAX) {
1861 ic->start_time = start_time;
1862 if (end_time != INT64_MIN) {
1863 if (end_time - start_time > duration)
1864 duration = end_time - start_time;
1867 if (duration != INT64_MIN) {
1868 ic->duration = duration;
1869 if (ic->file_size > 0) {
1870 /* compute the bitrate */
1871 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1872 (double)ic->duration;
1877 static void fill_all_stream_timings(AVFormatContext *ic)
1882 update_stream_timings(ic);
1883 for(i = 0;i < ic->nb_streams; i++) {
1884 st = ic->streams[i];
1885 if (st->start_time == AV_NOPTS_VALUE) {
1886 if(ic->start_time != AV_NOPTS_VALUE)
1887 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1888 if(ic->duration != AV_NOPTS_VALUE)
1889 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1894 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1896 int64_t filesize, duration;
1900 /* if bit_rate is already set, we believe it */
1901 if (ic->bit_rate <= 0) {
1903 for(i=0;i<ic->nb_streams;i++) {
1904 st = ic->streams[i];
1905 if (st->codec->bit_rate > 0)
1906 bit_rate += st->codec->bit_rate;
1908 ic->bit_rate = bit_rate;
1911 /* if duration is already set, we believe it */
1912 if (ic->duration == AV_NOPTS_VALUE &&
1913 ic->bit_rate != 0 &&
1914 ic->file_size != 0) {
1915 filesize = ic->file_size;
1917 for(i = 0; i < ic->nb_streams; i++) {
1918 st = ic->streams[i];
1919 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1920 if (st->duration == AV_NOPTS_VALUE)
1921 st->duration = duration;
1927 #define DURATION_MAX_READ_SIZE 250000
1928 #define DURATION_MAX_RETRY 3
1930 /* only usable for MPEG-PS streams */
1931 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1933 AVPacket pkt1, *pkt = &pkt1;
1935 int read_size, i, ret;
1937 int64_t filesize, offset, duration;
1942 /* flush packet queue */
1943 flush_packet_queue(ic);
1945 for (i=0; i<ic->nb_streams; i++) {
1946 st = ic->streams[i];
1947 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1948 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1951 av_parser_close(st->parser);
1953 av_free_packet(&st->cur_pkt);
1957 /* estimate the end time (duration) */
1958 /* XXX: may need to support wrapping */
1959 filesize = ic->file_size;
1960 end_time = AV_NOPTS_VALUE;
1962 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1966 avio_seek(ic->pb, offset, SEEK_SET);
1969 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1973 ret = av_read_packet(ic, pkt);
1974 }while(ret == AVERROR(EAGAIN));
1977 read_size += pkt->size;
1978 st = ic->streams[pkt->stream_index];
1979 if (pkt->pts != AV_NOPTS_VALUE &&
1980 (st->start_time != AV_NOPTS_VALUE ||
1981 st->first_dts != AV_NOPTS_VALUE)) {
1982 duration = end_time = pkt->pts;
1983 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
1984 else duration -= st->first_dts;
1986 duration += 1LL<<st->pts_wrap_bits;
1988 if (st->duration == AV_NOPTS_VALUE ||
1989 st->duration < duration)
1990 st->duration = duration;
1993 av_free_packet(pkt);
1995 }while( end_time==AV_NOPTS_VALUE
1996 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1997 && ++retry <= DURATION_MAX_RETRY);
1999 fill_all_stream_timings(ic);
2001 avio_seek(ic->pb, old_offset, SEEK_SET);
2002 for (i=0; i<ic->nb_streams; i++) {
2004 st->cur_dts= st->first_dts;
2005 st->last_IP_pts = AV_NOPTS_VALUE;
2006 st->reference_dts = AV_NOPTS_VALUE;
2010 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2014 /* get the file size, if possible */
2015 if (ic->iformat->flags & AVFMT_NOFILE) {
2018 file_size = avio_size(ic->pb);
2022 ic->file_size = file_size;
2024 if ((!strcmp(ic->iformat->name, "mpeg") ||
2025 !strcmp(ic->iformat->name, "mpegts")) &&
2026 file_size && ic->pb->seekable) {
2027 /* get accurate estimate from the PTSes */
2028 estimate_timings_from_pts(ic, old_offset);
2029 } else if (has_duration(ic)) {
2030 /* at least one component has timings - we use them for all
2032 fill_all_stream_timings(ic);
2034 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2035 /* less precise: use bitrate info */
2036 estimate_timings_from_bit_rate(ic);
2038 update_stream_timings(ic);
2042 AVStream av_unused *st;
2043 for(i = 0;i < ic->nb_streams; i++) {
2044 st = ic->streams[i];
2045 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2046 (double) st->start_time / AV_TIME_BASE,
2047 (double) st->duration / AV_TIME_BASE);
2049 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2050 (double) ic->start_time / AV_TIME_BASE,
2051 (double) ic->duration / AV_TIME_BASE,
2052 ic->bit_rate / 1000);
2056 static int has_codec_parameters(AVCodecContext *avctx)
2059 switch (avctx->codec_type) {
2060 case AVMEDIA_TYPE_AUDIO:
2061 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2062 if (!avctx->frame_size &&
2063 (avctx->codec_id == CODEC_ID_VORBIS ||
2064 avctx->codec_id == CODEC_ID_AAC ||
2065 avctx->codec_id == CODEC_ID_MP1 ||
2066 avctx->codec_id == CODEC_ID_MP2 ||
2067 avctx->codec_id == CODEC_ID_MP3 ||
2068 avctx->codec_id == CODEC_ID_SPEEX))
2071 case AVMEDIA_TYPE_VIDEO:
2072 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2078 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2081 static int has_decode_delay_been_guessed(AVStream *st)
2083 return st->codec->codec_id != CODEC_ID_H264 ||
2084 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames;
2087 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2091 int got_picture, data_size, ret=0;
2094 if(!st->codec->codec){
2095 codec = avcodec_find_decoder(st->codec->codec_id);
2098 ret = avcodec_open2(st->codec, codec, options);
2103 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) ||
2104 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) {
2105 switch(st->codec->codec_type) {
2106 case AVMEDIA_TYPE_VIDEO:
2107 avcodec_get_frame_defaults(&picture);
2108 ret = avcodec_decode_video2(st->codec, &picture,
2109 &got_picture, avpkt);
2111 case AVMEDIA_TYPE_AUDIO:
2112 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2113 samples = av_malloc(data_size);
2116 ret = avcodec_decode_audio3(st->codec, samples,
2128 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2130 while (tags->id != CODEC_ID_NONE) {
2138 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2141 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2142 if(tag == tags[i].tag)
2145 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2146 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2149 return CODEC_ID_NONE;
2152 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2155 for(i=0; tags && tags[i]; i++){
2156 int tag= ff_codec_get_tag(tags[i], id);
2162 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2165 for(i=0; tags && tags[i]; i++){
2166 enum CodecID id= ff_codec_get_id(tags[i], tag);
2167 if(id!=CODEC_ID_NONE) return id;
2169 return CODEC_ID_NONE;
2172 static void compute_chapters_end(AVFormatContext *s)
2175 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2177 for (i = 0; i < s->nb_chapters; i++)
2178 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2179 AVChapter *ch = s->chapters[i];
2180 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2183 for (j = 0; j < s->nb_chapters; j++) {
2184 AVChapter *ch1 = s->chapters[j];
2185 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2186 if (j != i && next_start > ch->start && next_start < end)
2189 ch->end = (end == INT64_MAX) ? ch->start : end;
2193 static int get_std_framerate(int i){
2194 if(i<60*12) return i*1001;
2195 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2199 * Is the time base unreliable.
2200 * This is a heuristic to balance between quick acceptance of the values in
2201 * the headers vs. some extra checks.
2202 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2203 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2204 * And there are "variable" fps files this needs to detect as well.
2206 static int tb_unreliable(AVCodecContext *c){
2207 if( c->time_base.den >= 101L*c->time_base.num
2208 || c->time_base.den < 5L*c->time_base.num
2209 /* || c->codec_tag == AV_RL32("DIVX")
2210 || c->codec_tag == AV_RL32("XVID")*/
2211 || c->codec_id == CODEC_ID_MPEG2VIDEO
2212 || c->codec_id == CODEC_ID_H264
2218 #if FF_API_FORMAT_PARAMETERS
2219 int av_find_stream_info(AVFormatContext *ic)
2221 return avformat_find_stream_info(ic, NULL);
2225 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2227 int i, count, ret, read_size, j;
2229 AVPacket pkt1, *pkt;
2230 int64_t old_offset = avio_tell(ic->pb);
2231 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2233 for(i=0;i<ic->nb_streams;i++) {
2235 st = ic->streams[i];
2237 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2238 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2239 /* if(!st->time_base.num)
2241 if(!st->codec->time_base.num)
2242 st->codec->time_base= st->time_base;
2244 //only for the split stuff
2245 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2246 st->parser = av_parser_init(st->codec->codec_id);
2247 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2248 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2251 assert(!st->codec->codec);
2252 codec = avcodec_find_decoder(st->codec->codec_id);
2254 /* Ensure that subtitle_header is properly set. */
2255 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2256 && codec && !st->codec->codec)
2257 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2259 //try to just open decoders, in case this is enough to get parameters
2260 if(!has_codec_parameters(st->codec)){
2261 if (codec && !st->codec->codec)
2262 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2266 for (i=0; i<ic->nb_streams; i++) {
2267 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2273 if(url_interrupt_cb()){
2275 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2279 /* check if one codec still needs to be handled */
2280 for(i=0;i<ic->nb_streams;i++) {
2281 int fps_analyze_framecount = 20;
2283 st = ic->streams[i];
2284 if (!has_codec_parameters(st->codec))
2286 /* if the timebase is coarse (like the usual millisecond precision
2287 of mkv), we need to analyze more frames to reliably arrive at
2289 if (av_q2d(st->time_base) > 0.0005)
2290 fps_analyze_framecount *= 2;
2291 if (ic->fps_probe_size >= 0)
2292 fps_analyze_framecount = ic->fps_probe_size;
2293 /* variable fps and no guess at the real fps */
2294 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2295 && st->info->duration_count < fps_analyze_framecount
2296 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2298 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2300 if(st->first_dts == AV_NOPTS_VALUE)
2303 if (i == ic->nb_streams) {
2304 /* NOTE: if the format has no header, then we need to read
2305 some packets to get most of the streams, so we cannot
2307 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2308 /* if we found the info for all the codecs, we can stop */
2310 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2314 /* we did not get all the codec info, but we read too much data */
2315 if (read_size >= ic->probesize) {
2317 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2321 /* NOTE: a new stream can be added there if no header in file
2322 (AVFMTCTX_NOHEADER) */
2323 ret = read_frame_internal(ic, &pkt1);
2324 if (ret == AVERROR(EAGAIN))
2329 ret = -1; /* we could not have all the codec parameters before EOF */
2330 for(i=0;i<ic->nb_streams;i++) {
2331 st = ic->streams[i];
2332 if (!has_codec_parameters(st->codec)){
2334 avcodec_string(buf, sizeof(buf), st->codec, 0);
2335 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2343 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2344 if ((ret = av_dup_packet(pkt)) < 0)
2345 goto find_stream_info_err;
2347 read_size += pkt->size;
2349 st = ic->streams[pkt->stream_index];
2350 if (st->codec_info_nb_frames>1) {
2351 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2352 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2355 st->info->codec_info_duration += pkt->duration;
2358 int64_t last = st->info->last_dts;
2359 int64_t duration= pkt->dts - last;
2361 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2362 double dur= duration * av_q2d(st->time_base);
2364 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2365 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2366 if (st->info->duration_count < 2)
2367 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2368 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2369 int framerate= get_std_framerate(i);
2370 int ticks= lrintf(dur*framerate/(1001*12));
2371 double error= dur - ticks*1001*12/(double)framerate;
2372 st->info->duration_error[i] += error*error;
2374 st->info->duration_count++;
2375 // ignore the first 4 values, they might have some random jitter
2376 if (st->info->duration_count > 3)
2377 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2379 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2380 st->info->last_dts = pkt->dts;
2382 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2383 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2385 st->codec->extradata_size= i;
2386 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2387 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2388 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2392 /* if still no information, we try to open the codec and to
2393 decompress the frame. We try to avoid that in most cases as
2394 it takes longer and uses more memory. For MPEG-4, we need to
2395 decompress for QuickTime.
2397 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2398 least one frame of codec data, this makes sure the codec initializes
2399 the channel configuration and does not only trust the values from the container.
2401 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2403 st->codec_info_nb_frames++;
2407 // close codecs which were opened in try_decode_frame()
2408 for(i=0;i<ic->nb_streams;i++) {
2409 st = ic->streams[i];
2410 if(st->codec->codec)
2411 avcodec_close(st->codec);
2413 for(i=0;i<ic->nb_streams;i++) {
2414 st = ic->streams[i];
2415 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2416 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2417 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2418 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2419 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2420 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2421 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2423 // the check for tb_unreliable() is not completely correct, since this is not about handling
2424 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2425 // ipmovie.c produces.
2426 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2427 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2428 if (st->info->duration_count && !st->r_frame_rate.num
2429 && tb_unreliable(st->codec) /*&&
2430 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2431 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2433 double best_error= 2*av_q2d(st->time_base);
2434 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2436 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2437 double error = st->info->duration_error[j] * get_std_framerate(j);
2438 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2439 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2440 if(error < best_error){
2442 num = get_std_framerate(j);
2445 // do not increase frame rate by more than 1 % in order to match a standard rate.
2446 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2447 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2450 if (!st->r_frame_rate.num){
2451 if( st->codec->time_base.den * (int64_t)st->time_base.num
2452 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2453 st->r_frame_rate.num = st->codec->time_base.den;
2454 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2456 st->r_frame_rate.num = st->time_base.den;
2457 st->r_frame_rate.den = st->time_base.num;
2460 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2461 if(!st->codec->bits_per_coded_sample)
2462 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2463 // set stream disposition based on audio service type
2464 switch (st->codec->audio_service_type) {
2465 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2466 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2467 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2468 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2469 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2470 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2471 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2472 st->disposition = AV_DISPOSITION_COMMENT; break;
2473 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2474 st->disposition = AV_DISPOSITION_KARAOKE; break;
2479 estimate_timings(ic, old_offset);
2481 compute_chapters_end(ic);
2484 /* correct DTS for B-frame streams with no timestamps */
2485 for(i=0;i<ic->nb_streams;i++) {
2486 st = ic->streams[i];
2487 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2489 ppktl = &ic->packet_buffer;
2491 if(ppkt1->stream_index != i)
2493 if(ppkt1->pkt->dts < 0)
2495 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2497 ppkt1->pkt->dts -= delta;
2502 st->cur_dts -= delta;
2508 find_stream_info_err:
2509 for (i=0; i < ic->nb_streams; i++)
2510 av_freep(&ic->streams[i]->info);
2514 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2518 for (i = 0; i < ic->nb_programs; i++)
2519 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2520 if (ic->programs[i]->stream_index[j] == s)
2521 return ic->programs[i];
2525 int av_find_best_stream(AVFormatContext *ic,
2526 enum AVMediaType type,
2527 int wanted_stream_nb,
2529 AVCodec **decoder_ret,
2532 int i, nb_streams = ic->nb_streams;
2533 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2534 unsigned *program = NULL;
2535 AVCodec *decoder = NULL, *best_decoder = NULL;
2537 if (related_stream >= 0 && wanted_stream_nb < 0) {
2538 AVProgram *p = find_program_from_stream(ic, related_stream);
2540 program = p->stream_index;
2541 nb_streams = p->nb_stream_indexes;
2544 for (i = 0; i < nb_streams; i++) {
2545 int real_stream_index = program ? program[i] : i;
2546 AVStream *st = ic->streams[real_stream_index];
2547 AVCodecContext *avctx = st->codec;
2548 if (avctx->codec_type != type)
2550 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2552 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2555 decoder = avcodec_find_decoder(st->codec->codec_id);
2558 ret = AVERROR_DECODER_NOT_FOUND;
2562 if (best_count >= st->codec_info_nb_frames)
2564 best_count = st->codec_info_nb_frames;
2565 ret = real_stream_index;
2566 best_decoder = decoder;
2567 if (program && i == nb_streams - 1 && ret < 0) {
2569 nb_streams = ic->nb_streams;
2570 i = 0; /* no related stream found, try again with everything */
2574 *decoder_ret = best_decoder;
2578 /*******************************************************/
2580 int av_read_play(AVFormatContext *s)
2582 if (s->iformat->read_play)
2583 return s->iformat->read_play(s);
2585 return avio_pause(s->pb, 0);
2586 return AVERROR(ENOSYS);
2589 int av_read_pause(AVFormatContext *s)
2591 if (s->iformat->read_pause)
2592 return s->iformat->read_pause(s);
2594 return avio_pause(s->pb, 1);
2595 return AVERROR(ENOSYS);
2598 void av_close_input_stream(AVFormatContext *s)
2600 flush_packet_queue(s);
2601 if (s->iformat->read_close)
2602 s->iformat->read_close(s);
2603 avformat_free_context(s);
2606 void avformat_free_context(AVFormatContext *s)
2612 if (s->iformat && s->iformat->priv_class && s->priv_data)
2613 av_opt_free(s->priv_data);
2615 for(i=0;i<s->nb_streams;i++) {
2616 /* free all data in a stream component */
2619 av_parser_close(st->parser);
2620 av_free_packet(&st->cur_pkt);
2622 av_dict_free(&st->metadata);
2623 av_free(st->index_entries);
2624 av_free(st->codec->extradata);
2625 av_free(st->codec->subtitle_header);
2627 av_free(st->priv_data);
2631 for(i=s->nb_programs-1; i>=0; i--) {
2632 av_dict_free(&s->programs[i]->metadata);
2633 av_freep(&s->programs[i]->stream_index);
2634 av_freep(&s->programs[i]);
2636 av_freep(&s->programs);
2637 av_freep(&s->priv_data);
2638 while(s->nb_chapters--) {
2639 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2640 av_free(s->chapters[s->nb_chapters]);
2642 av_freep(&s->chapters);
2643 av_dict_free(&s->metadata);
2644 av_freep(&s->streams);
2648 void av_close_input_file(AVFormatContext *s)
2650 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2652 av_close_input_stream(s);
2657 AVStream *av_new_stream(AVFormatContext *s, int id)
2663 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2665 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2668 s->streams = streams;
2670 st = av_mallocz(sizeof(AVStream));
2673 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2678 st->codec = avcodec_alloc_context3(NULL);
2680 /* no default bitrate if decoding */
2681 st->codec->bit_rate = 0;
2683 st->index = s->nb_streams;
2685 st->start_time = AV_NOPTS_VALUE;
2686 st->duration = AV_NOPTS_VALUE;
2687 /* we set the current DTS to 0 so that formats without any timestamps
2688 but durations get some timestamps, formats with some unknown
2689 timestamps have their first few packets buffered and the
2690 timestamps corrected before they are returned to the user */
2692 st->first_dts = AV_NOPTS_VALUE;
2693 st->probe_packets = MAX_PROBE_PACKETS;
2695 /* default pts setting is MPEG-like */
2696 av_set_pts_info(st, 33, 1, 90000);
2697 st->last_IP_pts = AV_NOPTS_VALUE;
2698 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2699 st->pts_buffer[i]= AV_NOPTS_VALUE;
2700 st->reference_dts = AV_NOPTS_VALUE;
2702 st->sample_aspect_ratio = (AVRational){0,1};
2704 s->streams[s->nb_streams++] = st;
2708 AVProgram *av_new_program(AVFormatContext *ac, int id)
2710 AVProgram *program=NULL;
2713 av_dlog(ac, "new_program: id=0x%04x\n", id);
2715 for(i=0; i<ac->nb_programs; i++)
2716 if(ac->programs[i]->id == id)
2717 program = ac->programs[i];
2720 program = av_mallocz(sizeof(AVProgram));
2723 dynarray_add(&ac->programs, &ac->nb_programs, program);
2724 program->discard = AVDISCARD_NONE;
2731 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2733 AVChapter *chapter = NULL;
2736 for(i=0; i<s->nb_chapters; i++)
2737 if(s->chapters[i]->id == id)
2738 chapter = s->chapters[i];
2741 chapter= av_mallocz(sizeof(AVChapter));
2744 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2746 av_dict_set(&chapter->metadata, "title", title, 0);
2748 chapter->time_base= time_base;
2749 chapter->start = start;
2755 /************************************************************/
2756 /* output media file */
2758 #if FF_API_FORMAT_PARAMETERS
2759 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2763 if (s->oformat->priv_data_size > 0) {
2764 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2766 return AVERROR(ENOMEM);
2767 if (s->oformat->priv_class) {
2768 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2769 av_opt_set_defaults(s->priv_data);
2772 s->priv_data = NULL;
2774 if (s->oformat->set_parameters) {
2775 ret = s->oformat->set_parameters(s, ap);
2783 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2785 const AVCodecTag *avctag;
2787 enum CodecID id = CODEC_ID_NONE;
2788 unsigned int tag = 0;
2791 * Check that tag + id is in the table
2792 * If neither is in the table -> OK
2793 * If tag is in the table with another id -> FAIL
2794 * If id is in the table with another tag -> FAIL unless strict < normal
2796 for (n = 0; s->oformat->codec_tag[n]; n++) {
2797 avctag = s->oformat->codec_tag[n];
2798 while (avctag->id != CODEC_ID_NONE) {
2799 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2801 if (id == st->codec->codec_id)
2804 if (avctag->id == st->codec->codec_id)
2809 if (id != CODEC_ID_NONE)
2811 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2816 #if FF_API_FORMAT_PARAMETERS
2817 int av_write_header(AVFormatContext *s)
2819 return avformat_write_header(s, NULL);
2823 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2827 AVDictionary *tmp = NULL;
2830 av_dict_copy(&tmp, *options, 0);
2831 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2834 // some sanity checks
2835 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2836 av_log(s, AV_LOG_ERROR, "no streams\n");
2837 ret = AVERROR(EINVAL);
2841 for(i=0;i<s->nb_streams;i++) {
2844 switch (st->codec->codec_type) {
2845 case AVMEDIA_TYPE_AUDIO:
2846 if(st->codec->sample_rate<=0){
2847 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2848 ret = AVERROR(EINVAL);
2851 if(!st->codec->block_align)
2852 st->codec->block_align = st->codec->channels *
2853 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2855 case AVMEDIA_TYPE_VIDEO:
2856 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2857 av_log(s, AV_LOG_ERROR, "time base not set\n");
2858 ret = AVERROR(EINVAL);
2861 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2862 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2863 ret = AVERROR(EINVAL);
2866 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2867 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2868 ret = AVERROR(EINVAL);
2874 if(s->oformat->codec_tag){
2875 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2876 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2877 st->codec->codec_tag= 0;
2879 if(st->codec->codec_tag){
2880 if (!validate_codec_tag(s, st)) {
2882 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2883 av_log(s, AV_LOG_ERROR,
2884 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2885 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2886 ret = AVERROR_INVALIDDATA;
2890 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2893 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2894 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2895 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2898 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2899 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2900 if (!s->priv_data) {
2901 ret = AVERROR(ENOMEM);
2904 if (s->oformat->priv_class) {
2905 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2906 av_opt_set_defaults(s->priv_data);
2907 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2912 /* set muxer identification string */
2913 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2914 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2917 if(s->oformat->write_header){
2918 ret = s->oformat->write_header(s);
2923 /* init PTS generation */
2924 for(i=0;i<s->nb_streams;i++) {
2925 int64_t den = AV_NOPTS_VALUE;
2928 switch (st->codec->codec_type) {
2929 case AVMEDIA_TYPE_AUDIO:
2930 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2932 case AVMEDIA_TYPE_VIDEO:
2933 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2938 if (den != AV_NOPTS_VALUE) {
2940 ret = AVERROR_INVALIDDATA;
2943 frac_init(&st->pts, 0, 0, den);
2948 av_dict_free(options);
2957 //FIXME merge with compute_pkt_fields
2958 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2959 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2960 int num, den, frame_size, i;
2962 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2963 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2965 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2966 return AVERROR(EINVAL);*/
2968 /* duration field */
2969 if (pkt->duration == 0) {
2970 compute_frame_duration(&num, &den, st, NULL, pkt);
2972 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2976 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2979 //XXX/FIXME this is a temporary hack until all encoders output pts
2980 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2982 // pkt->pts= st->cur_dts;
2983 pkt->pts= st->pts.val;
2986 //calculate dts from pts
2987 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2988 st->pts_buffer[0]= pkt->pts;
2989 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2990 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2991 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2992 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2994 pkt->dts= st->pts_buffer[0];
2997 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2998 av_log(s, AV_LOG_ERROR,
2999 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3000 st->index, st->cur_dts, pkt->dts);
3001 return AVERROR(EINVAL);
3003 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3004 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3005 return AVERROR(EINVAL);
3008 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3009 st->cur_dts= pkt->dts;
3010 st->pts.val= pkt->dts;
3013 switch (st->codec->codec_type) {
3014 case AVMEDIA_TYPE_AUDIO:
3015 frame_size = get_audio_frame_size(st->codec, pkt->size);
3017 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3018 likely equal to the encoder delay, but it would be better if we
3019 had the real timestamps from the encoder */
3020 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3021 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3024 case AVMEDIA_TYPE_VIDEO:
3025 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3033 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3035 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3037 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3040 ret= s->oformat->write_packet(s, pkt);
3043 s->streams[pkt->stream_index]->nb_frames++;
3047 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3048 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3050 AVPacketList **next_point, *this_pktl;
3052 this_pktl = av_mallocz(sizeof(AVPacketList));
3053 this_pktl->pkt= *pkt;
3054 pkt->destruct= NULL; // do not free original but only the copy
3055 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3057 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3058 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3060 next_point = &s->packet_buffer;
3063 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3064 while(!compare(s, &(*next_point)->pkt, pkt)){
3065 next_point= &(*next_point)->next;
3069 next_point = &(s->packet_buffer_end->next);
3072 assert(!*next_point);
3074 s->packet_buffer_end= this_pktl;
3077 this_pktl->next= *next_point;
3079 s->streams[pkt->stream_index]->last_in_packet_buffer=
3080 *next_point= this_pktl;
3083 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3085 AVStream *st = s->streams[ pkt ->stream_index];
3086 AVStream *st2= s->streams[ next->stream_index];
3087 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3091 return pkt->stream_index < next->stream_index;
3095 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3101 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3104 for(i=0; i < s->nb_streams; i++)
3105 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3107 if(stream_count && (s->nb_streams == stream_count || flush)){
3108 pktl= s->packet_buffer;
3111 s->packet_buffer= pktl->next;
3112 if(!s->packet_buffer)
3113 s->packet_buffer_end= NULL;
3115 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3116 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3120 av_init_packet(out);
3126 * Interleave an AVPacket correctly so it can be muxed.
3127 * @param out the interleaved packet will be output here
3128 * @param in the input packet
3129 * @param flush 1 if no further packets are available as input and all
3130 * remaining packets should be output
3131 * @return 1 if a packet was output, 0 if no packet could be output,
3132 * < 0 if an error occurred
3134 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3135 if(s->oformat->interleave_packet)
3136 return s->oformat->interleave_packet(s, out, in, flush);
3138 return av_interleave_packet_per_dts(s, out, in, flush);
3141 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3142 AVStream *st= s->streams[ pkt->stream_index];
3145 //FIXME/XXX/HACK drop zero sized packets
3146 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3149 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3150 pkt->size, pkt->dts, pkt->pts);
3151 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3154 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3155 return AVERROR(EINVAL);
3159 int ret= interleave_packet(s, &opkt, pkt, 0);
3160 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3163 ret= s->oformat->write_packet(s, &opkt);
3165 s->streams[opkt.stream_index]->nb_frames++;
3167 av_free_packet(&opkt);
3175 int av_write_trailer(AVFormatContext *s)
3181 ret= interleave_packet(s, &pkt, NULL, 1);
3182 if(ret<0) //FIXME cleanup needed for ret<0 ?
3187 ret= s->oformat->write_packet(s, &pkt);
3189 s->streams[pkt.stream_index]->nb_frames++;
3191 av_free_packet(&pkt);
3197 if(s->oformat->write_trailer)
3198 ret = s->oformat->write_trailer(s);
3200 for(i=0;i<s->nb_streams;i++) {
3201 av_freep(&s->streams[i]->priv_data);
3202 av_freep(&s->streams[i]->index_entries);
3204 if (s->iformat && s->iformat->priv_class)
3205 av_opt_free(s->priv_data);
3206 av_freep(&s->priv_data);
3210 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3213 AVProgram *program=NULL;
3216 if (idx >= ac->nb_streams) {
3217 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3221 for(i=0; i<ac->nb_programs; i++){
3222 if(ac->programs[i]->id != progid)
3224 program = ac->programs[i];
3225 for(j=0; j<program->nb_stream_indexes; j++)
3226 if(program->stream_index[j] == idx)
3229 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3232 program->stream_index = tmp;
3233 program->stream_index[program->nb_stream_indexes++] = idx;
3238 static void print_fps(double d, const char *postfix){
3239 uint64_t v= lrintf(d*100);
3240 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3241 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3242 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3245 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3247 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3248 AVDictionaryEntry *tag=NULL;
3250 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3251 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3252 if(strcmp("language", tag->key))
3253 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3258 /* "user interface" functions */
3259 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3262 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3263 AVStream *st = ic->streams[i];
3264 int g = av_gcd(st->time_base.num, st->time_base.den);
3265 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3266 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3267 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3268 /* the pid is an important information, so we display it */
3269 /* XXX: add a generic system */
3270 if (flags & AVFMT_SHOW_IDS)
3271 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3273 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3274 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3275 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3276 if (st->sample_aspect_ratio.num && // default
3277 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3278 AVRational display_aspect_ratio;
3279 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3280 st->codec->width*st->sample_aspect_ratio.num,
3281 st->codec->height*st->sample_aspect_ratio.den,
3283 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3284 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3285 display_aspect_ratio.num, display_aspect_ratio.den);
3287 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3288 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3289 print_fps(av_q2d(st->avg_frame_rate), "fps");
3290 if(st->r_frame_rate.den && st->r_frame_rate.num)
3291 print_fps(av_q2d(st->r_frame_rate), "tbr");
3292 if(st->time_base.den && st->time_base.num)
3293 print_fps(1/av_q2d(st->time_base), "tbn");
3294 if(st->codec->time_base.den && st->codec->time_base.num)
3295 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3297 if (st->disposition & AV_DISPOSITION_DEFAULT)
3298 av_log(NULL, AV_LOG_INFO, " (default)");
3299 if (st->disposition & AV_DISPOSITION_DUB)
3300 av_log(NULL, AV_LOG_INFO, " (dub)");
3301 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3302 av_log(NULL, AV_LOG_INFO, " (original)");
3303 if (st->disposition & AV_DISPOSITION_COMMENT)
3304 av_log(NULL, AV_LOG_INFO, " (comment)");
3305 if (st->disposition & AV_DISPOSITION_LYRICS)
3306 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3307 if (st->disposition & AV_DISPOSITION_KARAOKE)
3308 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3309 if (st->disposition & AV_DISPOSITION_FORCED)
3310 av_log(NULL, AV_LOG_INFO, " (forced)");
3311 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3312 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3313 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3314 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3315 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3316 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3317 av_log(NULL, AV_LOG_INFO, "\n");
3318 dump_metadata(NULL, st->metadata, " ");
3321 #if FF_API_DUMP_FORMAT
3322 void dump_format(AVFormatContext *ic,
3327 av_dump_format(ic, index, url, is_output);
3331 void av_dump_format(AVFormatContext *ic,
3337 uint8_t *printed = av_mallocz(ic->nb_streams);
3338 if (ic->nb_streams && !printed)
3341 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3342 is_output ? "Output" : "Input",
3344 is_output ? ic->oformat->name : ic->iformat->name,
3345 is_output ? "to" : "from", url);
3346 dump_metadata(NULL, ic->metadata, " ");
3348 av_log(NULL, AV_LOG_INFO, " Duration: ");
3349 if (ic->duration != AV_NOPTS_VALUE) {
3350 int hours, mins, secs, us;
3351 secs = ic->duration / AV_TIME_BASE;
3352 us = ic->duration % AV_TIME_BASE;
3357 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3358 (100 * us) / AV_TIME_BASE);
3360 av_log(NULL, AV_LOG_INFO, "N/A");
3362 if (ic->start_time != AV_NOPTS_VALUE) {
3364 av_log(NULL, AV_LOG_INFO, ", start: ");
3365 secs = ic->start_time / AV_TIME_BASE;
3366 us = abs(ic->start_time % AV_TIME_BASE);
3367 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3368 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3370 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3372 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3374 av_log(NULL, AV_LOG_INFO, "N/A");
3376 av_log(NULL, AV_LOG_INFO, "\n");
3378 for (i = 0; i < ic->nb_chapters; i++) {
3379 AVChapter *ch = ic->chapters[i];
3380 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3381 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3382 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3384 dump_metadata(NULL, ch->metadata, " ");
3386 if(ic->nb_programs) {
3387 int j, k, total = 0;
3388 for(j=0; j<ic->nb_programs; j++) {
3389 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3391 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3392 name ? name->value : "");
3393 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3394 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3395 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3396 printed[ic->programs[j]->stream_index[k]] = 1;
3398 total += ic->programs[j]->nb_stream_indexes;
3400 if (total < ic->nb_streams)
3401 av_log(NULL, AV_LOG_INFO, " No Program\n");
3403 for(i=0;i<ic->nb_streams;i++)
3405 dump_stream_format(ic, i, index, is_output);
3410 int64_t av_gettime(void)
3413 gettimeofday(&tv,NULL);
3414 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3417 uint64_t ff_ntp_time(void)
3419 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3422 #if FF_API_PARSE_DATE
3423 #include "libavutil/parseutils.h"
3425 int64_t parse_date(const char *timestr, int duration)
3428 av_parse_time(&timeval, timestr, duration);
3433 #if FF_API_FIND_INFO_TAG
3434 #include "libavutil/parseutils.h"
3436 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3438 return av_find_info_tag(arg, arg_size, tag1, info);
3442 int av_get_frame_filename(char *buf, int buf_size,
3443 const char *path, int number)
3446 char *q, buf1[20], c;
3447 int nd, len, percentd_found;
3459 while (isdigit(*p)) {
3460 nd = nd * 10 + *p++ - '0';
3463 } while (isdigit(c));
3472 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3474 if ((q - buf + len) > buf_size - 1)
3476 memcpy(q, buf1, len);
3484 if ((q - buf) < buf_size - 1)
3488 if (!percentd_found)
3497 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3501 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3503 for(i=0;i<size;i+=16) {
3510 PRINT(" %02x", buf[i+j]);
3515 for(j=0;j<len;j++) {
3517 if (c < ' ' || c > '~')
3526 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3528 hex_dump_internal(NULL, f, 0, buf, size);
3531 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3533 hex_dump_internal(avcl, NULL, level, buf, size);
3536 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3539 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3540 PRINT("stream #%d:\n", pkt->stream_index);
3541 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3542 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3543 /* DTS is _always_ valid after av_read_frame() */
3545 if (pkt->dts == AV_NOPTS_VALUE)
3548 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3549 /* PTS may not be known if B-frames are present. */
3551 if (pkt->pts == AV_NOPTS_VALUE)
3554 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3556 PRINT(" size=%d\n", pkt->size);
3559 av_hex_dump(f, pkt->data, pkt->size);
3563 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3565 AVRational tb = { 1, AV_TIME_BASE };
3566 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3570 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3572 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3576 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3578 AVRational tb = { 1, AV_TIME_BASE };
3579 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3583 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3586 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3589 void av_url_split(char *proto, int proto_size,
3590 char *authorization, int authorization_size,
3591 char *hostname, int hostname_size,
3593 char *path, int path_size,
3596 const char *p, *ls, *at, *col, *brk;
3598 if (port_ptr) *port_ptr = -1;
3599 if (proto_size > 0) proto[0] = 0;
3600 if (authorization_size > 0) authorization[0] = 0;
3601 if (hostname_size > 0) hostname[0] = 0;
3602 if (path_size > 0) path[0] = 0;
3604 /* parse protocol */
3605 if ((p = strchr(url, ':'))) {
3606 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3611 /* no protocol means plain filename */
3612 av_strlcpy(path, url, path_size);
3616 /* separate path from hostname */
3617 ls = strchr(p, '/');
3619 ls = strchr(p, '?');
3621 av_strlcpy(path, ls, path_size);
3623 ls = &p[strlen(p)]; // XXX
3625 /* the rest is hostname, use that to parse auth/port */
3627 /* authorization (user[:pass]@hostname) */
3628 if ((at = strchr(p, '@')) && at < ls) {
3629 av_strlcpy(authorization, p,
3630 FFMIN(authorization_size, at + 1 - p));
3631 p = at + 1; /* skip '@' */
3634 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3636 av_strlcpy(hostname, p + 1,
3637 FFMIN(hostname_size, brk - p));
3638 if (brk[1] == ':' && port_ptr)
3639 *port_ptr = atoi(brk + 2);
3640 } else if ((col = strchr(p, ':')) && col < ls) {
3641 av_strlcpy(hostname, p,
3642 FFMIN(col + 1 - p, hostname_size));
3643 if (port_ptr) *port_ptr = atoi(col + 1);
3645 av_strlcpy(hostname, p,
3646 FFMIN(ls + 1 - p, hostname_size));