2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
33 * @file libavformat/utils.c
34 * various utility functions for use within FFmpeg
37 unsigned avformat_version(void)
39 return LIBAVFORMAT_VERSION_INT;
42 /* fraction handling */
45 * f = val + (num / den) + 0.5.
47 * 'num' is normalized so that it is such as 0 <= num < den.
49 * @param f fractional number
50 * @param val integer value
51 * @param num must be >= 0
52 * @param den must be >= 1
54 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
67 * Fractional addition to f: f = f + (incr / f->den).
69 * @param f fractional number
70 * @param incr increment, can be positive or negative
72 static void av_frac_add(AVFrac *f, int64_t incr)
85 } else if (num >= den) {
92 /** head of registered input format linked list */
93 AVInputFormat *first_iformat = NULL;
94 /** head of registered output format linked list */
95 AVOutputFormat *first_oformat = NULL;
97 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 else return first_iformat;
103 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
105 if(f) return f->next;
106 else return first_oformat;
109 void av_register_input_format(AVInputFormat *format)
113 while (*p != NULL) p = &(*p)->next;
118 void av_register_output_format(AVOutputFormat *format)
122 while (*p != NULL) p = &(*p)->next;
127 int match_ext(const char *filename, const char *extensions)
135 ext = strrchr(filename, '.');
141 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 if (!strcasecmp(ext1, ext))
154 AVOutputFormat *guess_format(const char *short_name, const char *filename,
155 const char *mime_type)
157 AVOutputFormat *fmt, *fmt_found;
158 int score_max, score;
160 /* specific test for image sequences */
161 #ifdef CONFIG_IMAGE2_MUXER
162 if (!short_name && filename &&
163 av_filename_number_test(filename) &&
164 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
165 return guess_format("image2", NULL, NULL);
168 /* Find the proper file type. */
172 while (fmt != NULL) {
174 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
176 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
178 if (filename && fmt->extensions &&
179 match_ext(filename, fmt->extensions)) {
182 if (score > score_max) {
191 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
192 const char *mime_type)
194 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
197 AVOutputFormat *stream_fmt;
198 char stream_format_name[64];
200 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
201 stream_fmt = guess_format(stream_format_name, NULL, NULL);
210 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
211 const char *filename, const char *mime_type, enum CodecType type){
212 if(type == CODEC_TYPE_VIDEO){
213 enum CodecID codec_id= CODEC_ID_NONE;
215 #ifdef CONFIG_IMAGE2_MUXER
216 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
217 codec_id= av_guess_image2_codec(filename);
220 if(codec_id == CODEC_ID_NONE)
221 codec_id= fmt->video_codec;
223 }else if(type == CODEC_TYPE_AUDIO)
224 return fmt->audio_codec;
226 return CODEC_ID_NONE;
229 AVInputFormat *av_find_input_format(const char *short_name)
232 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
233 if (!strcmp(fmt->name, short_name))
239 /* memory handling */
241 void av_destruct_packet(AVPacket *pkt)
244 pkt->data = NULL; pkt->size = 0;
247 void av_init_packet(AVPacket *pkt)
249 pkt->pts = AV_NOPTS_VALUE;
250 pkt->dts = AV_NOPTS_VALUE;
254 pkt->stream_index = 0;
255 pkt->destruct= av_destruct_packet_nofree;
258 int av_new_packet(AVPacket *pkt, int size)
261 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
262 return AVERROR(ENOMEM);
263 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
265 return AVERROR(ENOMEM);
266 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
271 pkt->destruct = av_destruct_packet;
275 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
277 int ret= av_new_packet(pkt, size);
282 pkt->pos= url_ftell(s);
284 ret= get_buffer(s, pkt->data, size);
293 int av_dup_packet(AVPacket *pkt)
295 if (pkt->destruct != av_destruct_packet) {
297 /* We duplicate the packet and don't forget to add the padding again. */
298 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
299 return AVERROR(ENOMEM);
300 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
302 return AVERROR(ENOMEM);
304 memcpy(data, pkt->data, pkt->size);
305 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
307 pkt->destruct = av_destruct_packet;
312 int av_filename_number_test(const char *filename)
315 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
318 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
320 AVInputFormat *fmt1, *fmt;
324 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
325 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
328 if (fmt1->read_probe) {
329 score = fmt1->read_probe(pd);
330 } else if (fmt1->extensions) {
331 if (match_ext(pd->filename, fmt1->extensions)) {
335 if (score > *score_max) {
338 }else if (score == *score_max)
344 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
346 return av_probe_input_format2(pd, is_opened, &score);
349 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
352 fmt = av_probe_input_format2(pd, 1, &score);
355 if (!strcmp(fmt->name, "mp3")) {
356 st->codec->codec_id = CODEC_ID_MP3;
357 st->codec->codec_type = CODEC_TYPE_AUDIO;
358 } else if (!strcmp(fmt->name, "ac3")) {
359 st->codec->codec_id = CODEC_ID_AC3;
360 st->codec->codec_type = CODEC_TYPE_AUDIO;
361 } else if (!strcmp(fmt->name, "mpegvideo")) {
362 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
363 st->codec->codec_type = CODEC_TYPE_VIDEO;
364 } else if (!strcmp(fmt->name, "h264")) {
365 st->codec->codec_id = CODEC_ID_H264;
366 st->codec->codec_type = CODEC_TYPE_VIDEO;
372 /************************************************************/
373 /* input media file */
376 * Open a media file from an IO stream. 'fmt' must be specified.
378 static const char* format_to_name(void* ptr)
380 AVFormatContext* fc = (AVFormatContext*) ptr;
381 if(fc->iformat) return fc->iformat->name;
382 else if(fc->oformat) return fc->oformat->name;
386 #define OFFSET(x) offsetof(AVFormatContext,x)
387 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
388 //these names are too long to be readable
389 #define E AV_OPT_FLAG_ENCODING_PARAM
390 #define D AV_OPT_FLAG_DECODING_PARAM
392 static const AVOption options[]={
393 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
394 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
395 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
396 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
397 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
398 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
399 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
400 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
401 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
402 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
403 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
404 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
405 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
406 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
414 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
416 static void avformat_get_context_defaults(AVFormatContext *s)
418 memset(s, 0, sizeof(AVFormatContext));
420 s->av_class = &av_format_context_class;
422 av_opt_set_defaults(s);
425 AVFormatContext *av_alloc_format_context(void)
428 ic = av_malloc(sizeof(AVFormatContext));
430 avformat_get_context_defaults(ic);
431 ic->av_class = &av_format_context_class;
435 int av_open_input_stream(AVFormatContext **ic_ptr,
436 ByteIOContext *pb, const char *filename,
437 AVInputFormat *fmt, AVFormatParameters *ap)
441 AVFormatParameters default_ap;
445 memset(ap, 0, sizeof(default_ap));
448 if(!ap->prealloced_context)
449 ic = av_alloc_format_context();
453 err = AVERROR(ENOMEM);
458 ic->duration = AV_NOPTS_VALUE;
459 ic->start_time = AV_NOPTS_VALUE;
460 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
462 /* allocate private data */
463 if (fmt->priv_data_size > 0) {
464 ic->priv_data = av_mallocz(fmt->priv_data_size);
465 if (!ic->priv_data) {
466 err = AVERROR(ENOMEM);
470 ic->priv_data = NULL;
473 if (ic->iformat->read_header) {
474 err = ic->iformat->read_header(ic, ap);
479 if (pb && !ic->data_offset)
480 ic->data_offset = url_ftell(ic->pb);
487 av_freep(&ic->priv_data);
488 for(i=0;i<ic->nb_streams;i++) {
489 AVStream *st = ic->streams[i];
491 av_free(st->priv_data);
492 av_free(st->codec->extradata);
502 /** size of probe buffer, for guessing file type from file contents */
503 #define PROBE_BUF_MIN 2048
504 #define PROBE_BUF_MAX (1<<20)
506 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
509 AVFormatParameters *ap)
512 AVProbeData probe_data, *pd = &probe_data;
513 ByteIOContext *pb = NULL;
517 pd->filename = filename;
522 /* guess format if no file can be opened */
523 fmt = av_probe_input_format(pd, 0);
526 /* Do not open file if the format does not need it. XXX: specific
527 hack needed to handle RTSP/TCP */
528 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
529 /* if no file needed do not try to open one */
530 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
534 url_setbufsize(pb, buf_size);
537 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
538 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
539 /* read probe data */
540 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
541 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
542 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
543 if (url_fseek(pb, 0, SEEK_SET) < 0) {
545 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
551 /* guess file format */
552 fmt = av_probe_input_format2(pd, 1, &score);
557 /* if still no format found, error */
563 /* check filename in case an image number is expected */
564 if (fmt->flags & AVFMT_NEEDNUMBER) {
565 if (!av_filename_number_test(filename)) {
566 err = AVERROR_NUMEXPECTED;
570 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
583 /*******************************************************/
585 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
586 AVPacketList **plast_pktl){
587 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
592 (*plast_pktl)->next = pktl;
594 *packet_buffer = pktl;
596 /* add the packet in the buffered packet list */
602 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
608 AVPacketList *pktl = s->raw_packet_buffer;
612 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
613 s->raw_packet_buffer = pktl->next;
620 ret= s->iformat->read_packet(s, pkt);
623 st= s->streams[pkt->stream_index];
625 switch(st->codec->codec_type){
626 case CODEC_TYPE_VIDEO:
627 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
629 case CODEC_TYPE_AUDIO:
630 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
632 case CODEC_TYPE_SUBTITLE:
633 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
637 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
640 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
642 if(st->codec->codec_id == CODEC_ID_PROBE){
643 AVProbeData *pd = &st->probe_data;
645 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
646 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
647 pd->buf_size += pkt->size;
648 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
650 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
651 set_codec_from_probe_data(st, pd, 1);
652 if(st->codec->codec_id != CODEC_ID_PROBE){
661 /**********************************************************/
664 * Get the number of samples of an audio frame. Return -1 on error.
666 static int get_audio_frame_size(AVCodecContext *enc, int size)
670 if(enc->codec_id == CODEC_ID_VORBIS)
673 if (enc->frame_size <= 1) {
674 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
676 if (bits_per_sample) {
677 if (enc->channels == 0)
679 frame_size = (size << 3) / (bits_per_sample * enc->channels);
681 /* used for example by ADPCM codecs */
682 if (enc->bit_rate == 0)
684 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
687 frame_size = enc->frame_size;
694 * Return the frame duration in seconds. Return 0 if not available.
696 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
697 AVCodecParserContext *pc, AVPacket *pkt)
703 switch(st->codec->codec_type) {
704 case CODEC_TYPE_VIDEO:
705 if(st->time_base.num*1000LL > st->time_base.den){
706 *pnum = st->time_base.num;
707 *pden = st->time_base.den;
708 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
709 *pnum = st->codec->time_base.num;
710 *pden = st->codec->time_base.den;
711 if (pc && pc->repeat_pict) {
713 *pnum = (*pnum) * (2 + pc->repeat_pict);
717 case CODEC_TYPE_AUDIO:
718 frame_size = get_audio_frame_size(st->codec, pkt->size);
722 *pden = st->codec->sample_rate;
729 static int is_intra_only(AVCodecContext *enc){
730 if(enc->codec_type == CODEC_TYPE_AUDIO){
732 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
733 switch(enc->codec_id){
735 case CODEC_ID_MJPEGB:
737 case CODEC_ID_RAWVIDEO:
738 case CODEC_ID_DVVIDEO:
739 case CODEC_ID_HUFFYUV:
740 case CODEC_ID_FFVHUFF:
751 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
752 int64_t dts, int64_t pts)
754 AVStream *st= s->streams[stream_index];
755 AVPacketList *pktl= s->packet_buffer;
757 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
760 st->first_dts= dts - st->cur_dts;
763 for(; pktl; pktl= pktl->next){
764 if(pktl->pkt.stream_index != stream_index)
766 //FIXME think more about this check
767 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
768 pktl->pkt.pts += st->first_dts;
770 if(pktl->pkt.dts != AV_NOPTS_VALUE)
771 pktl->pkt.dts += st->first_dts;
773 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
774 st->start_time= pktl->pkt.pts;
776 if (st->start_time == AV_NOPTS_VALUE)
777 st->start_time = pts;
780 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
782 AVPacketList *pktl= s->packet_buffer;
785 if(st->first_dts != AV_NOPTS_VALUE){
786 cur_dts= st->first_dts;
787 for(; pktl; pktl= pktl->next){
788 if(pktl->pkt.stream_index == pkt->stream_index){
789 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
791 cur_dts -= pkt->duration;
794 pktl= s->packet_buffer;
795 st->first_dts = cur_dts;
796 }else if(st->cur_dts)
799 for(; pktl; pktl= pktl->next){
800 if(pktl->pkt.stream_index != pkt->stream_index)
802 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
803 && !pktl->pkt.duration){
804 pktl->pkt.dts= cur_dts;
805 if(!st->codec->has_b_frames)
806 pktl->pkt.pts= cur_dts;
807 cur_dts += pkt->duration;
808 pktl->pkt.duration= pkt->duration;
812 if(st->first_dts == AV_NOPTS_VALUE)
813 st->cur_dts= cur_dts;
816 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
817 AVCodecParserContext *pc, AVPacket *pkt)
819 int num, den, presentation_delayed, delay, i;
822 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
823 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
824 pkt->dts -= 1LL<<st->pts_wrap_bits;
827 if (pkt->duration == 0) {
828 compute_frame_duration(&num, &den, st, pc, pkt);
830 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
832 if(pkt->duration != 0 && s->packet_buffer)
833 update_initial_durations(s, st, pkt);
837 /* correct timestamps with byte offset if demuxers only have timestamps
838 on packet boundaries */
839 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
840 /* this will estimate bitrate based on this frame's duration and size */
841 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
842 if(pkt->pts != AV_NOPTS_VALUE)
844 if(pkt->dts != AV_NOPTS_VALUE)
848 /* do we have a video B-frame ? */
849 delay= st->codec->has_b_frames;
850 presentation_delayed = 0;
851 /* XXX: need has_b_frame, but cannot get it if the codec is
854 pc && pc->pict_type != FF_B_TYPE)
855 presentation_delayed = 1;
856 /* This may be redundant, but it should not hurt. */
857 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
858 presentation_delayed = 1;
860 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
861 /* interpolate PTS and DTS if they are not present */
862 if(delay==0 || (delay==1 && pc)){
863 if (presentation_delayed) {
864 /* DTS = decompression timestamp */
865 /* PTS = presentation timestamp */
866 if (pkt->dts == AV_NOPTS_VALUE)
867 pkt->dts = st->last_IP_pts;
868 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
869 if (pkt->dts == AV_NOPTS_VALUE)
870 pkt->dts = st->cur_dts;
872 /* this is tricky: the dts must be incremented by the duration
873 of the frame we are displaying, i.e. the last I- or P-frame */
874 if (st->last_IP_duration == 0)
875 st->last_IP_duration = pkt->duration;
876 if(pkt->dts != AV_NOPTS_VALUE)
877 st->cur_dts = pkt->dts + st->last_IP_duration;
878 st->last_IP_duration = pkt->duration;
879 st->last_IP_pts= pkt->pts;
880 /* cannot compute PTS if not present (we can compute it only
881 by knowing the future */
882 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
883 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
884 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
885 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
886 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
887 pkt->pts += pkt->duration;
888 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
892 /* presentation is not delayed : PTS and DTS are the same */
893 if(pkt->pts == AV_NOPTS_VALUE)
895 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
896 if(pkt->pts == AV_NOPTS_VALUE)
897 pkt->pts = st->cur_dts;
899 if(pkt->pts != AV_NOPTS_VALUE)
900 st->cur_dts = pkt->pts + pkt->duration;
904 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
905 st->pts_buffer[0]= pkt->pts;
906 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
907 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
908 if(pkt->dts == AV_NOPTS_VALUE)
909 pkt->dts= st->pts_buffer[0];
911 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
913 if(pkt->dts > st->cur_dts)
914 st->cur_dts = pkt->dts;
917 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
920 if(is_intra_only(st->codec))
921 pkt->flags |= PKT_FLAG_KEY;
924 /* keyframe computation */
925 if (pc->pict_type == FF_I_TYPE)
926 pkt->flags |= PKT_FLAG_KEY;
930 void av_destruct_packet_nofree(AVPacket *pkt)
932 pkt->data = NULL; pkt->size = 0;
935 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
943 /* select current input stream component */
946 if (!st->need_parsing || !st->parser) {
947 /* no parsing needed: we just output the packet as is */
948 /* raw data support */
950 compute_pkt_fields(s, st, NULL, pkt);
953 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
954 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
955 s->cur_ptr, s->cur_len,
956 s->cur_pkt.pts, s->cur_pkt.dts);
957 s->cur_pkt.pts = AV_NOPTS_VALUE;
958 s->cur_pkt.dts = AV_NOPTS_VALUE;
959 /* increment read pointer */
963 /* return packet if any */
966 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
968 pkt->stream_index = st->index;
969 pkt->pts = st->parser->pts;
970 pkt->dts = st->parser->dts;
971 pkt->destruct = av_destruct_packet_nofree;
972 compute_pkt_fields(s, st, st->parser, pkt);
974 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
975 ff_reduce_index(s, st->index);
976 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
977 0, 0, AVINDEX_KEYFRAME);
984 av_free_packet(&s->cur_pkt);
988 /* read next packet */
989 ret = av_read_packet(s, &s->cur_pkt);
991 if (ret == AVERROR(EAGAIN))
993 /* return the last frames, if any */
994 for(i = 0; i < s->nb_streams; i++) {
996 if (st->parser && st->need_parsing) {
997 av_parser_parse(st->parser, st->codec,
998 &pkt->data, &pkt->size,
1000 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1005 /* no more packets: really terminate parsing */
1009 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
1010 s->cur_pkt.dts != AV_NOPTS_VALUE &&
1011 s->cur_pkt.pts < s->cur_pkt.dts){
1012 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1013 s->cur_pkt.stream_index,
1017 // av_free_packet(&s->cur_pkt);
1021 st = s->streams[s->cur_pkt.stream_index];
1022 if(s->debug & FF_FDEBUG_TS)
1023 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1024 s->cur_pkt.stream_index,
1031 s->cur_ptr = s->cur_pkt.data;
1032 s->cur_len = s->cur_pkt.size;
1033 if (st->need_parsing && !st->parser) {
1034 st->parser = av_parser_init(st->codec->codec_id);
1036 /* no parser available: just output the raw packets */
1037 st->need_parsing = AVSTREAM_PARSE_NONE;
1038 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1039 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1041 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1042 st->parser->next_frame_offset=
1043 st->parser->cur_offset= s->cur_pkt.pos;
1048 if(s->debug & FF_FDEBUG_TS)
1049 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1059 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1063 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1066 pktl = s->packet_buffer;
1068 AVPacket *next_pkt= &pktl->pkt;
1070 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1071 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1072 if( pktl->pkt.stream_index == next_pkt->stream_index
1073 && next_pkt->dts < pktl->pkt.dts
1074 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1075 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1076 next_pkt->pts= pktl->pkt.dts;
1080 pktl = s->packet_buffer;
1083 if( next_pkt->pts != AV_NOPTS_VALUE
1084 || next_pkt->dts == AV_NOPTS_VALUE
1086 /* read packet from packet buffer, if there is data */
1088 s->packet_buffer = pktl->next;
1094 int ret= av_read_frame_internal(s, pkt);
1096 if(pktl && ret != AVERROR(EAGAIN)){
1103 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1104 &s->packet_buffer_end)) < 0)
1105 return AVERROR(ENOMEM);
1107 assert(!s->packet_buffer);
1108 return av_read_frame_internal(s, pkt);
1113 /* XXX: suppress the packet queue */
1114 static void flush_packet_queue(AVFormatContext *s)
1119 pktl = s->packet_buffer;
1122 s->packet_buffer = pktl->next;
1123 av_free_packet(&pktl->pkt);
1128 /*******************************************************/
1131 int av_find_default_stream_index(AVFormatContext *s)
1133 int first_audio_index = -1;
1137 if (s->nb_streams <= 0)
1139 for(i = 0; i < s->nb_streams; i++) {
1141 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1144 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1145 first_audio_index = i;
1147 return first_audio_index >= 0 ? first_audio_index : 0;
1151 * Flush the frame reader.
1153 static void av_read_frame_flush(AVFormatContext *s)
1158 flush_packet_queue(s);
1160 /* free previous packet */
1162 if (s->cur_st->parser)
1163 av_free_packet(&s->cur_pkt);
1170 /* for each stream, reset read state */
1171 for(i = 0; i < s->nb_streams; i++) {
1175 av_parser_close(st->parser);
1178 st->last_IP_pts = AV_NOPTS_VALUE;
1179 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1183 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1186 for(i = 0; i < s->nb_streams; i++) {
1187 AVStream *st = s->streams[i];
1189 st->cur_dts = av_rescale(timestamp,
1190 st->time_base.den * (int64_t)ref_st->time_base.num,
1191 st->time_base.num * (int64_t)ref_st->time_base.den);
1195 void ff_reduce_index(AVFormatContext *s, int stream_index)
1197 AVStream *st= s->streams[stream_index];
1198 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1200 if((unsigned)st->nb_index_entries >= max_entries){
1202 for(i=0; 2*i<st->nb_index_entries; i++)
1203 st->index_entries[i]= st->index_entries[2*i];
1204 st->nb_index_entries= i;
1208 int av_add_index_entry(AVStream *st,
1209 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1211 AVIndexEntry *entries, *ie;
1214 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1217 entries = av_fast_realloc(st->index_entries,
1218 &st->index_entries_allocated_size,
1219 (st->nb_index_entries + 1) *
1220 sizeof(AVIndexEntry));
1224 st->index_entries= entries;
1226 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1229 index= st->nb_index_entries++;
1230 ie= &entries[index];
1231 assert(index==0 || ie[-1].timestamp < timestamp);
1233 ie= &entries[index];
1234 if(ie->timestamp != timestamp){
1235 if(ie->timestamp <= timestamp)
1237 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1238 st->nb_index_entries++;
1239 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1240 distance= ie->min_distance;
1244 ie->timestamp = timestamp;
1245 ie->min_distance= distance;
1252 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1255 AVIndexEntry *entries= st->index_entries;
1256 int nb_entries= st->nb_index_entries;
1265 timestamp = entries[m].timestamp;
1266 if(timestamp >= wanted_timestamp)
1268 if(timestamp <= wanted_timestamp)
1271 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1273 if(!(flags & AVSEEK_FLAG_ANY)){
1274 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1275 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1286 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1287 AVInputFormat *avif= s->iformat;
1288 int64_t pos_min, pos_max, pos, pos_limit;
1289 int64_t ts_min, ts_max, ts;
1293 if (stream_index < 0)
1297 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1301 ts_min= AV_NOPTS_VALUE;
1302 pos_limit= -1; //gcc falsely says it may be uninitialized
1304 st= s->streams[stream_index];
1305 if(st->index_entries){
1308 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1309 index= FFMAX(index, 0);
1310 e= &st->index_entries[index];
1312 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1314 ts_min= e->timestamp;
1316 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1323 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1324 assert(index < st->nb_index_entries);
1326 e= &st->index_entries[index];
1327 assert(e->timestamp >= target_ts);
1329 ts_max= e->timestamp;
1330 pos_limit= pos_max - e->min_distance;
1332 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1333 pos_max,pos_limit, ts_max);
1338 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1343 url_fseek(s->pb, pos, SEEK_SET);
1345 av_update_cur_dts(s, st, ts);
1350 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1352 int64_t start_pos, filesize;
1356 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1359 if(ts_min == AV_NOPTS_VALUE){
1360 pos_min = s->data_offset;
1361 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1362 if (ts_min == AV_NOPTS_VALUE)
1366 if(ts_max == AV_NOPTS_VALUE){
1368 filesize = url_fsize(s->pb);
1369 pos_max = filesize - 1;
1372 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1374 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1375 if (ts_max == AV_NOPTS_VALUE)
1379 int64_t tmp_pos= pos_max + 1;
1380 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1381 if(tmp_ts == AV_NOPTS_VALUE)
1385 if(tmp_pos >= filesize)
1391 if(ts_min > ts_max){
1393 }else if(ts_min == ts_max){
1398 while (pos_min < pos_limit) {
1400 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1404 assert(pos_limit <= pos_max);
1407 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1408 // interpolate position (better than dichotomy)
1409 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1410 + pos_min - approximate_keyframe_distance;
1411 }else if(no_change==1){
1412 // bisection, if interpolation failed to change min or max pos last time
1413 pos = (pos_min + pos_limit)>>1;
1415 /* linear search if bisection failed, can only happen if there
1416 are very few or no keyframes between min/max */
1421 else if(pos > pos_limit)
1425 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1431 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1433 if(ts == AV_NOPTS_VALUE){
1434 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1437 assert(ts != AV_NOPTS_VALUE);
1438 if (target_ts <= ts) {
1439 pos_limit = start_pos - 1;
1443 if (target_ts >= ts) {
1449 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1450 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1453 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1455 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1456 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1457 pos, ts_min, target_ts, ts_max);
1463 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1464 int64_t pos_min, pos_max;
1468 if (stream_index < 0)
1471 st= s->streams[stream_index];
1474 pos_min = s->data_offset;
1475 pos_max = url_fsize(s->pb) - 1;
1477 if (pos < pos_min) pos= pos_min;
1478 else if(pos > pos_max) pos= pos_max;
1480 url_fseek(s->pb, pos, SEEK_SET);
1483 av_update_cur_dts(s, st, ts);
1488 static int av_seek_frame_generic(AVFormatContext *s,
1489 int stream_index, int64_t timestamp, int flags)
1495 st = s->streams[stream_index];
1497 index = av_index_search_timestamp(st, timestamp, flags);
1499 if(index < 0 || index==st->nb_index_entries-1){
1503 if(st->nb_index_entries){
1504 assert(st->index_entries);
1505 ie= &st->index_entries[st->nb_index_entries-1];
1506 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1508 av_update_cur_dts(s, st, ie->timestamp);
1510 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1514 int ret = av_read_frame(s, &pkt);
1517 av_free_packet(&pkt);
1518 if(stream_index == pkt.stream_index){
1519 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1523 index = av_index_search_timestamp(st, timestamp, flags);
1528 av_read_frame_flush(s);
1529 if (s->iformat->read_seek){
1530 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1533 ie = &st->index_entries[index];
1534 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1536 av_update_cur_dts(s, st, ie->timestamp);
1541 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1546 av_read_frame_flush(s);
1548 if(flags & AVSEEK_FLAG_BYTE)
1549 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1551 if(stream_index < 0){
1552 stream_index= av_find_default_stream_index(s);
1553 if(stream_index < 0)
1556 st= s->streams[stream_index];
1557 /* timestamp for default must be expressed in AV_TIME_BASE units */
1558 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1561 /* first, we try the format specific seek */
1562 if (s->iformat->read_seek)
1563 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1570 if(s->iformat->read_timestamp)
1571 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1573 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1576 /*******************************************************/
1579 * Returns TRUE if the stream has accurate duration in any stream.
1581 * @return TRUE if the stream has accurate duration for at least one component.
1583 static int av_has_duration(AVFormatContext *ic)
1588 for(i = 0;i < ic->nb_streams; i++) {
1589 st = ic->streams[i];
1590 if (st->duration != AV_NOPTS_VALUE)
1597 * Estimate the stream timings from the one of each components.
1599 * Also computes the global bitrate if possible.
1601 static void av_update_stream_timings(AVFormatContext *ic)
1603 int64_t start_time, start_time1, end_time, end_time1;
1604 int64_t duration, duration1;
1608 start_time = INT64_MAX;
1609 end_time = INT64_MIN;
1610 duration = INT64_MIN;
1611 for(i = 0;i < ic->nb_streams; i++) {
1612 st = ic->streams[i];
1613 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1614 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1615 if (start_time1 < start_time)
1616 start_time = start_time1;
1617 if (st->duration != AV_NOPTS_VALUE) {
1618 end_time1 = start_time1
1619 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1620 if (end_time1 > end_time)
1621 end_time = end_time1;
1624 if (st->duration != AV_NOPTS_VALUE) {
1625 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1626 if (duration1 > duration)
1627 duration = duration1;
1630 if (start_time != INT64_MAX) {
1631 ic->start_time = start_time;
1632 if (end_time != INT64_MIN) {
1633 if (end_time - start_time > duration)
1634 duration = end_time - start_time;
1637 if (duration != INT64_MIN) {
1638 ic->duration = duration;
1639 if (ic->file_size > 0) {
1640 /* compute the bitrate */
1641 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1642 (double)ic->duration;
1647 static void fill_all_stream_timings(AVFormatContext *ic)
1652 av_update_stream_timings(ic);
1653 for(i = 0;i < ic->nb_streams; i++) {
1654 st = ic->streams[i];
1655 if (st->start_time == AV_NOPTS_VALUE) {
1656 if(ic->start_time != AV_NOPTS_VALUE)
1657 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1658 if(ic->duration != AV_NOPTS_VALUE)
1659 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1664 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1666 int64_t filesize, duration;
1670 /* if bit_rate is already set, we believe it */
1671 if (ic->bit_rate == 0) {
1673 for(i=0;i<ic->nb_streams;i++) {
1674 st = ic->streams[i];
1675 bit_rate += st->codec->bit_rate;
1677 ic->bit_rate = bit_rate;
1680 /* if duration is already set, we believe it */
1681 if (ic->duration == AV_NOPTS_VALUE &&
1682 ic->bit_rate != 0 &&
1683 ic->file_size != 0) {
1684 filesize = ic->file_size;
1686 for(i = 0; i < ic->nb_streams; i++) {
1687 st = ic->streams[i];
1688 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1689 if (st->duration == AV_NOPTS_VALUE)
1690 st->duration = duration;
1696 #define DURATION_MAX_READ_SIZE 250000
1698 /* only usable for MPEG-PS streams */
1699 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1701 AVPacket pkt1, *pkt = &pkt1;
1703 int read_size, i, ret;
1705 int64_t filesize, offset, duration;
1707 /* free previous packet */
1708 if (ic->cur_st && ic->cur_st->parser)
1709 av_free_packet(&ic->cur_pkt);
1712 /* flush packet queue */
1713 flush_packet_queue(ic);
1715 for(i=0;i<ic->nb_streams;i++) {
1716 st = ic->streams[i];
1718 av_parser_close(st->parser);
1723 /* we read the first packets to get the first PTS (not fully
1724 accurate, but it is enough now) */
1725 url_fseek(ic->pb, 0, SEEK_SET);
1728 if (read_size >= DURATION_MAX_READ_SIZE)
1730 /* if all info is available, we can stop */
1731 for(i = 0;i < ic->nb_streams; i++) {
1732 st = ic->streams[i];
1733 if (st->start_time == AV_NOPTS_VALUE)
1736 if (i == ic->nb_streams)
1739 ret = av_read_packet(ic, pkt);
1742 read_size += pkt->size;
1743 st = ic->streams[pkt->stream_index];
1744 if (pkt->pts != AV_NOPTS_VALUE) {
1745 if (st->start_time == AV_NOPTS_VALUE)
1746 st->start_time = pkt->pts;
1748 av_free_packet(pkt);
1751 /* estimate the end time (duration) */
1752 /* XXX: may need to support wrapping */
1753 filesize = ic->file_size;
1754 offset = filesize - DURATION_MAX_READ_SIZE;
1758 url_fseek(ic->pb, offset, SEEK_SET);
1761 if (read_size >= DURATION_MAX_READ_SIZE)
1764 ret = av_read_packet(ic, pkt);
1767 read_size += pkt->size;
1768 st = ic->streams[pkt->stream_index];
1769 if (pkt->pts != AV_NOPTS_VALUE &&
1770 st->start_time != AV_NOPTS_VALUE) {
1771 end_time = pkt->pts;
1772 duration = end_time - st->start_time;
1774 if (st->duration == AV_NOPTS_VALUE ||
1775 st->duration < duration)
1776 st->duration = duration;
1779 av_free_packet(pkt);
1782 fill_all_stream_timings(ic);
1784 url_fseek(ic->pb, old_offset, SEEK_SET);
1785 for(i=0; i<ic->nb_streams; i++){
1787 st->cur_dts= st->first_dts;
1788 st->last_IP_pts = AV_NOPTS_VALUE;
1792 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1796 /* get the file size, if possible */
1797 if (ic->iformat->flags & AVFMT_NOFILE) {
1800 file_size = url_fsize(ic->pb);
1804 ic->file_size = file_size;
1806 if ((!strcmp(ic->iformat->name, "mpeg") ||
1807 !strcmp(ic->iformat->name, "mpegts")) &&
1808 file_size && !url_is_streamed(ic->pb)) {
1809 /* get accurate estimate from the PTSes */
1810 av_estimate_timings_from_pts(ic, old_offset);
1811 } else if (av_has_duration(ic)) {
1812 /* at least one component has timings - we use them for all
1814 fill_all_stream_timings(ic);
1816 /* less precise: use bitrate info */
1817 av_estimate_timings_from_bit_rate(ic);
1819 av_update_stream_timings(ic);
1825 for(i = 0;i < ic->nb_streams; i++) {
1826 st = ic->streams[i];
1827 printf("%d: start_time: %0.3f duration: %0.3f\n",
1828 i, (double)st->start_time / AV_TIME_BASE,
1829 (double)st->duration / AV_TIME_BASE);
1831 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1832 (double)ic->start_time / AV_TIME_BASE,
1833 (double)ic->duration / AV_TIME_BASE,
1834 ic->bit_rate / 1000);
1839 static int has_codec_parameters(AVCodecContext *enc)
1842 switch(enc->codec_type) {
1843 case CODEC_TYPE_AUDIO:
1844 val = enc->sample_rate && enc->channels;
1845 if(!enc->frame_size &&
1846 (enc->codec_id == CODEC_ID_VORBIS ||
1847 enc->codec_id == CODEC_ID_AAC))
1850 case CODEC_TYPE_VIDEO:
1851 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1857 return enc->codec_id != CODEC_ID_NONE && val != 0;
1860 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1864 int got_picture, data_size, ret=0;
1867 if(!st->codec->codec){
1868 codec = avcodec_find_decoder(st->codec->codec_id);
1871 ret = avcodec_open(st->codec, codec);
1876 if(!has_codec_parameters(st->codec)){
1877 switch(st->codec->codec_type) {
1878 case CODEC_TYPE_VIDEO:
1879 ret = avcodec_decode_video(st->codec, &picture,
1880 &got_picture, data, size);
1882 case CODEC_TYPE_AUDIO:
1883 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1884 samples = av_malloc(data_size);
1887 ret = avcodec_decode_audio2(st->codec, samples,
1888 &data_size, data, size);
1899 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1901 while (tags->id != CODEC_ID_NONE) {
1909 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1912 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1913 if(tag == tags[i].tag)
1916 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1917 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1918 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1919 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1920 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1923 return CODEC_ID_NONE;
1926 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1929 for(i=0; tags && tags[i]; i++){
1930 int tag= codec_get_tag(tags[i], id);
1936 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1939 for(i=0; tags && tags[i]; i++){
1940 enum CodecID id= codec_get_id(tags[i], tag);
1941 if(id!=CODEC_ID_NONE) return id;
1943 return CODEC_ID_NONE;
1946 static void compute_chapters_end(AVFormatContext *s)
1950 for (i=0; i+1<s->nb_chapters; i++)
1951 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1952 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1953 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1954 s->chapters[i]->end = s->chapters[i+1]->start;
1957 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1958 assert(s->start_time != AV_NOPTS_VALUE);
1959 assert(s->duration > 0);
1960 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1962 s->chapters[i]->time_base);
1966 /* absolute maximum size we read until we abort */
1967 #define MAX_READ_SIZE 5000000
1969 #define MAX_STD_TIMEBASES (60*12+5)
1970 static int get_std_framerate(int i){
1971 if(i<60*12) return i*1001;
1972 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1976 * Is the time base unreliable.
1977 * This is a heuristic to balance between quick acceptance of the values in
1978 * the headers vs. some extra checks.
1979 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1980 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1981 * And there are "variable" fps files this needs to detect as well.
1983 static int tb_unreliable(AVCodecContext *c){
1984 if( c->time_base.den >= 101L*c->time_base.num
1985 || c->time_base.den < 5L*c->time_base.num
1986 /* || c->codec_tag == ff_get_fourcc("DIVX")
1987 || c->codec_tag == ff_get_fourcc("XVID")*/
1988 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1993 int av_find_stream_info(AVFormatContext *ic)
1995 int i, count, ret, read_size, j;
1997 AVPacket pkt1, *pkt;
1998 int64_t last_dts[MAX_STREAMS];
1999 int duration_count[MAX_STREAMS]={0};
2000 double (*duration_error)[MAX_STD_TIMEBASES];
2001 offset_t old_offset = url_ftell(ic->pb);
2002 int64_t codec_info_duration[MAX_STREAMS]={0};
2003 int codec_info_nb_frames[MAX_STREAMS]={0};
2005 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2006 if (!duration_error) return AVERROR(ENOMEM);
2008 for(i=0;i<ic->nb_streams;i++) {
2009 st = ic->streams[i];
2010 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2011 /* if(!st->time_base.num)
2013 if(!st->codec->time_base.num)
2014 st->codec->time_base= st->time_base;
2016 //only for the split stuff
2018 st->parser = av_parser_init(st->codec->codec_id);
2019 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2020 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2025 for(i=0;i<MAX_STREAMS;i++){
2026 last_dts[i]= AV_NOPTS_VALUE;
2032 /* check if one codec still needs to be handled */
2033 for(i=0;i<ic->nb_streams;i++) {
2034 st = ic->streams[i];
2035 if (!has_codec_parameters(st->codec))
2037 /* variable fps and no guess at the real fps */
2038 if( tb_unreliable(st->codec)
2039 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2041 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2043 if(st->first_dts == AV_NOPTS_VALUE)
2046 if (i == ic->nb_streams) {
2047 /* NOTE: if the format has no header, then we need to read
2048 some packets to get most of the streams, so we cannot
2050 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2051 /* if we found the info for all the codecs, we can stop */
2056 /* we did not get all the codec info, but we read too much data */
2057 if (read_size >= MAX_READ_SIZE) {
2062 /* NOTE: a new stream can be added there if no header in file
2063 (AVFMTCTX_NOHEADER) */
2064 ret = av_read_frame_internal(ic, &pkt1);
2067 ret = -1; /* we could not have all the codec parameters before EOF */
2068 for(i=0;i<ic->nb_streams;i++) {
2069 st = ic->streams[i];
2070 if (!has_codec_parameters(st->codec)){
2072 avcodec_string(buf, sizeof(buf), st->codec, 0);
2073 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2081 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2082 if(av_dup_packet(pkt) < 0) {
2083 av_free(duration_error);
2084 return AVERROR(ENOMEM);
2087 read_size += pkt->size;
2089 st = ic->streams[pkt->stream_index];
2090 if(codec_info_nb_frames[st->index]>1)
2091 codec_info_duration[st->index] += pkt->duration;
2092 if (pkt->duration != 0)
2093 codec_info_nb_frames[st->index]++;
2096 int index= pkt->stream_index;
2097 int64_t last= last_dts[index];
2098 int64_t duration= pkt->dts - last;
2100 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2101 double dur= duration * av_q2d(st->time_base);
2103 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2104 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2105 if(duration_count[index] < 2)
2106 memset(duration_error[index], 0, sizeof(*duration_error));
2107 for(i=1; i<MAX_STD_TIMEBASES; i++){
2108 int framerate= get_std_framerate(i);
2109 int ticks= lrintf(dur*framerate/(1001*12));
2110 double error= dur - ticks*1001*12/(double)framerate;
2111 duration_error[index][i] += error*error;
2113 duration_count[index]++;
2115 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2116 last_dts[pkt->stream_index]= pkt->dts;
2118 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2119 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2121 st->codec->extradata_size= i;
2122 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2123 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2124 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2128 /* if still no information, we try to open the codec and to
2129 decompress the frame. We try to avoid that in most cases as
2130 it takes longer and uses more memory. For MPEG-4, we need to
2131 decompress for QuickTime. */
2132 if (!has_codec_parameters(st->codec) /*&&
2133 (st->codec->codec_id == CODEC_ID_FLV1 ||
2134 st->codec->codec_id == CODEC_ID_H264 ||
2135 st->codec->codec_id == CODEC_ID_H263 ||
2136 st->codec->codec_id == CODEC_ID_H261 ||
2137 st->codec->codec_id == CODEC_ID_VORBIS ||
2138 st->codec->codec_id == CODEC_ID_MJPEG ||
2139 st->codec->codec_id == CODEC_ID_PNG ||
2140 st->codec->codec_id == CODEC_ID_PAM ||
2141 st->codec->codec_id == CODEC_ID_PGM ||
2142 st->codec->codec_id == CODEC_ID_PGMYUV ||
2143 st->codec->codec_id == CODEC_ID_PBM ||
2144 st->codec->codec_id == CODEC_ID_PPM ||
2145 st->codec->codec_id == CODEC_ID_SHORTEN ||
2146 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2147 try_decode_frame(st, pkt->data, pkt->size);
2149 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2155 // close codecs which were opened in try_decode_frame()
2156 for(i=0;i<ic->nb_streams;i++) {
2157 st = ic->streams[i];
2158 if(st->codec->codec)
2159 avcodec_close(st->codec);
2161 for(i=0;i<ic->nb_streams;i++) {
2162 st = ic->streams[i];
2163 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2164 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2165 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2167 if(duration_count[i]
2168 && tb_unreliable(st->codec) /*&&
2169 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2170 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2171 double best_error= 2*av_q2d(st->time_base);
2172 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2174 for(j=1; j<MAX_STD_TIMEBASES; j++){
2175 double error= duration_error[i][j] * get_std_framerate(j);
2176 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2177 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2178 if(error < best_error){
2180 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2185 if (!st->r_frame_rate.num){
2186 if( st->codec->time_base.den * (int64_t)st->time_base.num
2187 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2188 st->r_frame_rate.num = st->codec->time_base.den;
2189 st->r_frame_rate.den = st->codec->time_base.num;
2191 st->r_frame_rate.num = st->time_base.den;
2192 st->r_frame_rate.den = st->time_base.num;
2195 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2196 if(!st->codec->bits_per_sample)
2197 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2201 av_estimate_timings(ic, old_offset);
2203 compute_chapters_end(ic);
2206 /* correct DTS for B-frame streams with no timestamps */
2207 for(i=0;i<ic->nb_streams;i++) {
2208 st = ic->streams[i];
2209 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2211 ppktl = &ic->packet_buffer;
2213 if(ppkt1->stream_index != i)
2215 if(ppkt1->pkt->dts < 0)
2217 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2219 ppkt1->pkt->dts -= delta;
2224 st->cur_dts -= delta;
2230 av_free(duration_error);
2235 /*******************************************************/
2237 int av_read_play(AVFormatContext *s)
2239 if (s->iformat->read_play)
2240 return s->iformat->read_play(s);
2242 return av_url_read_fpause(s->pb, 0);
2243 return AVERROR(ENOSYS);
2246 int av_read_pause(AVFormatContext *s)
2248 if (s->iformat->read_pause)
2249 return s->iformat->read_pause(s);
2251 return av_url_read_fpause(s->pb, 1);
2252 return AVERROR(ENOSYS);
2255 void av_close_input_stream(AVFormatContext *s)
2260 /* free previous packet */
2261 if (s->cur_st && s->cur_st->parser)
2262 av_free_packet(&s->cur_pkt);
2264 if (s->iformat->read_close)
2265 s->iformat->read_close(s);
2266 for(i=0;i<s->nb_streams;i++) {
2267 /* free all data in a stream component */
2270 av_parser_close(st->parser);
2272 av_free(st->index_entries);
2273 av_free(st->codec->extradata);
2275 av_free(st->filename);
2276 av_free(st->priv_data);
2279 for(i=s->nb_programs-1; i>=0; i--) {
2280 av_freep(&s->programs[i]->provider_name);
2281 av_freep(&s->programs[i]->name);
2282 av_freep(&s->programs[i]->stream_index);
2283 av_freep(&s->programs[i]);
2285 av_freep(&s->programs);
2286 flush_packet_queue(s);
2287 av_freep(&s->priv_data);
2288 while(s->nb_chapters--) {
2289 av_free(s->chapters[s->nb_chapters]->title);
2290 av_free(s->chapters[s->nb_chapters]);
2292 av_freep(&s->chapters);
2296 void av_close_input_file(AVFormatContext *s)
2298 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2299 av_close_input_stream(s);
2304 AVStream *av_new_stream(AVFormatContext *s, int id)
2309 if (s->nb_streams >= MAX_STREAMS)
2312 st = av_mallocz(sizeof(AVStream));
2316 st->codec= avcodec_alloc_context();
2318 /* no default bitrate if decoding */
2319 st->codec->bit_rate = 0;
2321 st->index = s->nb_streams;
2323 st->start_time = AV_NOPTS_VALUE;
2324 st->duration = AV_NOPTS_VALUE;
2325 /* we set the current DTS to 0 so that formats without any timestamps
2326 but durations get some timestamps, formats with some unknown
2327 timestamps have their first few packets buffered and the
2328 timestamps corrected before they are returned to the user */
2330 st->first_dts = AV_NOPTS_VALUE;
2332 /* default pts setting is MPEG-like */
2333 av_set_pts_info(st, 33, 1, 90000);
2334 st->last_IP_pts = AV_NOPTS_VALUE;
2335 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2336 st->pts_buffer[i]= AV_NOPTS_VALUE;
2338 st->sample_aspect_ratio = (AVRational){0,1};
2340 s->streams[s->nb_streams++] = st;
2344 AVProgram *av_new_program(AVFormatContext *ac, int id)
2346 AVProgram *program=NULL;
2350 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2353 for(i=0; i<ac->nb_programs; i++)
2354 if(ac->programs[i]->id == id)
2355 program = ac->programs[i];
2358 program = av_mallocz(sizeof(AVProgram));
2361 dynarray_add(&ac->programs, &ac->nb_programs, program);
2362 program->discard = AVDISCARD_NONE;
2369 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2371 assert(!provider_name == !name);
2373 av_free(program->provider_name);
2374 av_free(program-> name);
2375 program->provider_name = av_strdup(provider_name);
2376 program-> name = av_strdup( name);
2380 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2382 AVChapter *chapter = NULL;
2385 for(i=0; i<s->nb_chapters; i++)
2386 if(s->chapters[i]->id == id)
2387 chapter = s->chapters[i];
2390 chapter= av_mallocz(sizeof(AVChapter));
2393 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2395 av_free(chapter->title);
2396 chapter->title = av_strdup(title);
2398 chapter->time_base= time_base;
2399 chapter->start = start;
2405 /************************************************************/
2406 /* output media file */
2408 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2412 if (s->oformat->priv_data_size > 0) {
2413 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2415 return AVERROR(ENOMEM);
2417 s->priv_data = NULL;
2419 if (s->oformat->set_parameters) {
2420 ret = s->oformat->set_parameters(s, ap);
2427 int av_write_header(AVFormatContext *s)
2432 // some sanity checks
2433 for(i=0;i<s->nb_streams;i++) {
2436 switch (st->codec->codec_type) {
2437 case CODEC_TYPE_AUDIO:
2438 if(st->codec->sample_rate<=0){
2439 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2443 case CODEC_TYPE_VIDEO:
2444 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2445 av_log(s, AV_LOG_ERROR, "time base not set\n");
2448 if(st->codec->width<=0 || st->codec->height<=0){
2449 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2455 if(s->oformat->codec_tag){
2456 if(st->codec->codec_tag){
2458 //check that tag + id is in the table
2459 //if neither is in the table -> OK
2460 //if tag is in the table with another id -> FAIL
2461 //if id is in the table with another tag -> FAIL unless strict < ?
2463 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2467 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2468 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2470 return AVERROR(ENOMEM);
2473 if(s->oformat->write_header){
2474 ret = s->oformat->write_header(s);
2479 /* init PTS generation */
2480 for(i=0;i<s->nb_streams;i++) {
2481 int64_t den = AV_NOPTS_VALUE;
2484 switch (st->codec->codec_type) {
2485 case CODEC_TYPE_AUDIO:
2486 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2488 case CODEC_TYPE_VIDEO:
2489 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2494 if (den != AV_NOPTS_VALUE) {
2496 return AVERROR_INVALIDDATA;
2497 av_frac_init(&st->pts, 0, 0, den);
2503 //FIXME merge with compute_pkt_fields
2504 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2505 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2506 int num, den, frame_size, i;
2508 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2510 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2513 /* duration field */
2514 if (pkt->duration == 0) {
2515 compute_frame_duration(&num, &den, st, NULL, pkt);
2517 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2521 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2524 //XXX/FIXME this is a temporary hack until all encoders output pts
2525 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2527 // pkt->pts= st->cur_dts;
2528 pkt->pts= st->pts.val;
2531 //calculate dts from pts
2532 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2533 st->pts_buffer[0]= pkt->pts;
2534 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2535 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2536 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2537 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2539 pkt->dts= st->pts_buffer[0];
2542 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2543 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2546 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2547 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2551 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2552 st->cur_dts= pkt->dts;
2553 st->pts.val= pkt->dts;
2556 switch (st->codec->codec_type) {
2557 case CODEC_TYPE_AUDIO:
2558 frame_size = get_audio_frame_size(st->codec, pkt->size);
2560 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2561 likely equal to the encoder delay, but it would be better if we
2562 had the real timestamps from the encoder */
2563 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2564 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2567 case CODEC_TYPE_VIDEO:
2568 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2576 static void truncate_ts(AVStream *st, AVPacket *pkt){
2577 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2580 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2582 if (pkt->pts != AV_NOPTS_VALUE)
2583 pkt->pts &= pts_mask;
2584 if (pkt->dts != AV_NOPTS_VALUE)
2585 pkt->dts &= pts_mask;
2588 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2590 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2592 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2595 truncate_ts(s->streams[pkt->stream_index], pkt);
2597 ret= s->oformat->write_packet(s, pkt);
2599 ret= url_ferror(s->pb);
2603 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2604 AVPacketList *pktl, **next_point, *this_pktl;
2606 int streams[MAX_STREAMS];
2609 AVStream *st= s->streams[ pkt->stream_index];
2611 // assert(pkt->destruct != av_destruct_packet); //FIXME
2613 this_pktl = av_mallocz(sizeof(AVPacketList));
2614 this_pktl->pkt= *pkt;
2615 if(pkt->destruct == av_destruct_packet)
2616 pkt->destruct= NULL; // not shared -> must keep original from being freed
2618 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2620 next_point = &s->packet_buffer;
2622 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2623 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2624 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2625 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2627 next_point= &(*next_point)->next;
2629 this_pktl->next= *next_point;
2630 *next_point= this_pktl;
2633 memset(streams, 0, sizeof(streams));
2634 pktl= s->packet_buffer;
2636 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2637 if(streams[ pktl->pkt.stream_index ] == 0)
2639 streams[ pktl->pkt.stream_index ]++;
2643 if(stream_count && (s->nb_streams == stream_count || flush)){
2644 pktl= s->packet_buffer;
2647 s->packet_buffer= pktl->next;
2651 av_init_packet(out);
2657 * Interleaves an AVPacket correctly so it can be muxed.
2658 * @param out the interleaved packet will be output here
2659 * @param in the input packet
2660 * @param flush 1 if no further packets are available as input and all
2661 * remaining packets should be output
2662 * @return 1 if a packet was output, 0 if no packet could be output,
2663 * < 0 if an error occurred
2665 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2666 if(s->oformat->interleave_packet)
2667 return s->oformat->interleave_packet(s, out, in, flush);
2669 return av_interleave_packet_per_dts(s, out, in, flush);
2672 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2673 AVStream *st= s->streams[ pkt->stream_index];
2675 //FIXME/XXX/HACK drop zero sized packets
2676 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2679 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2680 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2683 if(pkt->dts == AV_NOPTS_VALUE)
2688 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2689 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2692 truncate_ts(s->streams[opkt.stream_index], &opkt);
2693 ret= s->oformat->write_packet(s, &opkt);
2695 av_free_packet(&opkt);
2700 if(url_ferror(s->pb))
2701 return url_ferror(s->pb);
2705 int av_write_trailer(AVFormatContext *s)
2711 ret= av_interleave_packet(s, &pkt, NULL, 1);
2712 if(ret<0) //FIXME cleanup needed for ret<0 ?
2717 truncate_ts(s->streams[pkt.stream_index], &pkt);
2718 ret= s->oformat->write_packet(s, &pkt);
2720 av_free_packet(&pkt);
2724 if(url_ferror(s->pb))
2728 if(s->oformat->write_trailer)
2729 ret = s->oformat->write_trailer(s);
2732 ret=url_ferror(s->pb);
2733 for(i=0;i<s->nb_streams;i++)
2734 av_freep(&s->streams[i]->priv_data);
2735 av_freep(&s->priv_data);
2739 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2742 AVProgram *program=NULL;
2745 for(i=0; i<ac->nb_programs; i++){
2746 if(ac->programs[i]->id != progid)
2748 program = ac->programs[i];
2749 for(j=0; j<program->nb_stream_indexes; j++)
2750 if(program->stream_index[j] == idx)
2753 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2756 program->stream_index = tmp;
2757 program->stream_index[program->nb_stream_indexes++] = idx;
2762 /* "user interface" functions */
2763 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2766 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2767 AVStream *st = ic->streams[i];
2768 int g = ff_gcd(st->time_base.num, st->time_base.den);
2769 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2770 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2771 /* the pid is an important information, so we display it */
2772 /* XXX: add a generic system */
2773 if (flags & AVFMT_SHOW_IDS)
2774 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2775 if (strlen(st->language) > 0)
2776 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2777 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2778 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2779 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2780 if(st->r_frame_rate.den && st->r_frame_rate.num)
2781 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2782 /* else if(st->time_base.den && st->time_base.num)
2783 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2785 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2787 av_log(NULL, AV_LOG_INFO, "\n");
2790 void dump_format(AVFormatContext *ic,
2797 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2798 is_output ? "Output" : "Input",
2800 is_output ? ic->oformat->name : ic->iformat->name,
2801 is_output ? "to" : "from", url);
2803 av_log(NULL, AV_LOG_INFO, " Duration: ");
2804 if (ic->duration != AV_NOPTS_VALUE) {
2805 int hours, mins, secs, us;
2806 secs = ic->duration / AV_TIME_BASE;
2807 us = ic->duration % AV_TIME_BASE;
2812 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2813 (100 * us) / AV_TIME_BASE);
2815 av_log(NULL, AV_LOG_INFO, "N/A");
2817 if (ic->start_time != AV_NOPTS_VALUE) {
2819 av_log(NULL, AV_LOG_INFO, ", start: ");
2820 secs = ic->start_time / AV_TIME_BASE;
2821 us = ic->start_time % AV_TIME_BASE;
2822 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2823 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2825 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2827 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2829 av_log(NULL, AV_LOG_INFO, "N/A");
2831 av_log(NULL, AV_LOG_INFO, "\n");
2833 if(ic->nb_programs) {
2835 for(j=0; j<ic->nb_programs; j++) {
2836 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2837 ic->programs[j]->name ? ic->programs[j]->name : "");
2838 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2839 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2842 for(i=0;i<ic->nb_streams;i++)
2843 dump_stream_format(ic, i, index, is_output);
2846 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2848 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2851 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2853 AVRational frame_rate;
2854 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2855 *frame_rate_num= frame_rate.num;
2856 *frame_rate_den= frame_rate.den;
2861 * Gets the current time in microseconds.
2863 int64_t av_gettime(void)
2866 gettimeofday(&tv,NULL);
2867 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2870 int64_t parse_date(const char *datestr, int duration)
2876 static const char * const date_fmt[] = {
2880 static const char * const time_fmt[] = {
2890 time_t now = time(0);
2892 len = strlen(datestr);
2894 lastch = datestr[len - 1];
2897 is_utc = (lastch == 'z' || lastch == 'Z');
2899 memset(&dt, 0, sizeof(dt));
2904 /* parse the year-month-day part */
2905 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2906 q = small_strptime(p, date_fmt[i], &dt);
2912 /* if the year-month-day part is missing, then take the
2913 * current year-month-day time */
2918 dt = *localtime(&now);
2920 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2925 if (*p == 'T' || *p == 't' || *p == ' ')
2928 /* parse the hour-minute-second part */
2929 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2930 q = small_strptime(p, time_fmt[i], &dt);
2936 /* parse datestr as a duration */
2941 /* parse datestr as HH:MM:SS */
2942 q = small_strptime(p, time_fmt[0], &dt);
2944 /* parse datestr as S+ */
2945 dt.tm_sec = strtol(p, (char **)&q, 10);
2947 /* the parsing didn't succeed */
2954 /* Now we have all the fields that we can get */
2960 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2962 dt.tm_isdst = -1; /* unknown */
2972 /* parse the .m... part */
2976 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2979 val += n * (*q - '0');
2983 return negative ? -t : t;
2986 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2996 while (*p != '\0' && *p != '=' && *p != '&') {
2997 if ((q - tag) < sizeof(tag) - 1)
3005 while (*p != '&' && *p != '\0') {
3006 if ((q - arg) < arg_size - 1) {
3016 if (!strcmp(tag, tag1))
3025 int av_get_frame_filename(char *buf, int buf_size,
3026 const char *path, int number)
3029 char *q, buf1[20], c;
3030 int nd, len, percentd_found;
3042 while (isdigit(*p)) {
3043 nd = nd * 10 + *p++ - '0';
3046 } while (isdigit(c));
3055 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3057 if ((q - buf + len) > buf_size - 1)
3059 memcpy(q, buf1, len);
3067 if ((q - buf) < buf_size - 1)
3071 if (!percentd_found)
3080 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3083 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3085 for(i=0;i<size;i+=16) {
3092 PRINT(" %02x", buf[i+j]);
3097 for(j=0;j<len;j++) {
3099 if (c < ' ' || c > '~')
3108 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3110 hex_dump_internal(NULL, f, 0, buf, size);
3113 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3115 hex_dump_internal(avcl, NULL, level, buf, size);
3118 //FIXME needs to know the time_base
3119 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3121 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3122 PRINT("stream #%d:\n", pkt->stream_index);
3123 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3124 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3125 /* DTS is _always_ valid after av_read_frame() */
3127 if (pkt->dts == AV_NOPTS_VALUE)
3130 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3131 /* PTS may not be known if B-frames are present. */
3133 if (pkt->pts == AV_NOPTS_VALUE)
3136 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3138 PRINT(" size=%d\n", pkt->size);
3141 av_hex_dump(f, pkt->data, pkt->size);
3144 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3146 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3149 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3151 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3154 void url_split(char *proto, int proto_size,
3155 char *authorization, int authorization_size,
3156 char *hostname, int hostname_size,
3158 char *path, int path_size,
3161 const char *p, *ls, *at, *col, *brk;
3163 if (port_ptr) *port_ptr = -1;
3164 if (proto_size > 0) proto[0] = 0;
3165 if (authorization_size > 0) authorization[0] = 0;
3166 if (hostname_size > 0) hostname[0] = 0;
3167 if (path_size > 0) path[0] = 0;
3169 /* parse protocol */
3170 if ((p = strchr(url, ':'))) {
3171 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3176 /* no protocol means plain filename */
3177 av_strlcpy(path, url, path_size);
3181 /* separate path from hostname */
3182 ls = strchr(p, '/');
3184 ls = strchr(p, '?');
3186 av_strlcpy(path, ls, path_size);
3188 ls = &p[strlen(p)]; // XXX
3190 /* the rest is hostname, use that to parse auth/port */
3192 /* authorization (user[:pass]@hostname) */
3193 if ((at = strchr(p, '@')) && at < ls) {
3194 av_strlcpy(authorization, p,
3195 FFMIN(authorization_size, at + 1 - p));
3196 p = at + 1; /* skip '@' */
3199 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3201 av_strlcpy(hostname, p + 1,
3202 FFMIN(hostname_size, brk - p));
3203 if (brk[1] == ':' && port_ptr)
3204 *port_ptr = atoi(brk + 2);
3205 } else if ((col = strchr(p, ':')) && col < ls) {
3206 av_strlcpy(hostname, p,
3207 FFMIN(col + 1 - p, hostname_size));
3208 if (port_ptr) *port_ptr = atoi(col + 1);
3210 av_strlcpy(hostname, p,
3211 FFMIN(ls + 1 - p, hostname_size));
3215 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3216 int pts_num, int pts_den)
3218 unsigned int gcd= ff_gcd(pts_num, pts_den);
3219 s->pts_wrap_bits = pts_wrap_bits;
3220 s->time_base.num = pts_num/gcd;
3221 s->time_base.den = pts_den/gcd;
3224 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);