2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
220 (*plast_pktl)->next = pktl;
222 *packet_buffer = pktl;
224 /* Add the packet in the buffered packet list. */
230 static int queue_attached_pictures(AVFormatContext *s)
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
239 return AVERROR(ENOMEM);
241 add_to_pktbuf(&s->internal->raw_packet_buffer, ©,
242 &s->internal->raw_packet_buffer_end);
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
250 AVFormatContext *s = *ps;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
261 av_dict_copy(&tmp, *options, 0);
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
266 if ((ret = init_input(s, filename, &tmp)) < 0)
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
307 if ((ret = queue_attached_pictures(s)) < 0)
310 if (s->pb && !s->internal->data_offset)
311 s->internal->data_offset = avio_tell(s->pb);
313 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
316 av_dict_free(options);
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
327 avformat_free_context(s);
332 /*******************************************************/
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
350 st->probe_packets = 0;
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
378 AVPacketList *pktl = s->internal->raw_packet_buffer;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
390 pd = &st->probe_data;
393 s->internal->raw_packet_buffer = pktl->next;
394 s->internal->raw_packet_buffer_remaining_size += pkt->size;
403 ret = s->iformat->read_packet(s, pkt);
405 if (!pktl || ret == AVERROR(EAGAIN))
407 for (i = 0; i < s->nb_streams; i++) {
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
425 st = s->streams[pkt->stream_index];
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
446 add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
447 &s->internal->raw_packet_buffer_end);
448 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
450 if ((err = probe_codec(s, st, pkt)) < 0)
455 /**********************************************************/
458 * Return the frame duration in seconds. Return 0 if not available.
460 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
461 AVCodecParserContext *pc, AVPacket *pkt)
463 AVRational codec_framerate = s->iformat ? st->codec->framerate :
464 av_inv_q(st->codec->time_base);
469 switch (st->codec->codec_type) {
470 case AVMEDIA_TYPE_VIDEO:
471 if (st->avg_frame_rate.num) {
472 *pnum = st->avg_frame_rate.den;
473 *pden = st->avg_frame_rate.num;
474 } else if (st->time_base.num * 1000LL > st->time_base.den) {
475 *pnum = st->time_base.num;
476 *pden = st->time_base.den;
477 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
478 *pnum = codec_framerate.den;
479 *pden = codec_framerate.num;
480 if (pc && pc->repeat_pict) {
481 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
482 *pden /= 1 + pc->repeat_pict;
484 *pnum *= 1 + pc->repeat_pict;
486 /* If this codec can be interlaced or progressive then we need
487 * a parser to compute duration of a packet. Thus if we have
488 * no parser in such case leave duration undefined. */
489 if (st->codec->ticks_per_frame > 1 && !pc)
493 case AVMEDIA_TYPE_AUDIO:
494 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
495 if (frame_size <= 0 || st->codec->sample_rate <= 0)
498 *pden = st->codec->sample_rate;
505 static int is_intra_only(enum AVCodecID id)
507 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
510 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
515 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
516 int64_t dts, int64_t pts)
518 AVStream *st = s->streams[stream_index];
519 AVPacketList *pktl = s->internal->packet_buffer;
521 if (st->first_dts != AV_NOPTS_VALUE ||
522 dts == AV_NOPTS_VALUE ||
523 st->cur_dts == AV_NOPTS_VALUE)
526 st->first_dts = dts - st->cur_dts;
529 for (; pktl; pktl = pktl->next) {
530 if (pktl->pkt.stream_index != stream_index)
532 // FIXME: think more about this check
533 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
534 pktl->pkt.pts += st->first_dts;
536 if (pktl->pkt.dts != AV_NOPTS_VALUE)
537 pktl->pkt.dts += st->first_dts;
539 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
540 st->start_time = pktl->pkt.pts;
542 if (st->start_time == AV_NOPTS_VALUE)
543 st->start_time = pts;
546 static void update_initial_durations(AVFormatContext *s, AVStream *st,
547 int stream_index, int duration)
549 AVPacketList *pktl = s->internal->packet_buffer;
552 if (st->first_dts != AV_NOPTS_VALUE) {
553 cur_dts = st->first_dts;
554 for (; pktl; pktl = pktl->next) {
555 if (pktl->pkt.stream_index == stream_index) {
556 if (pktl->pkt.pts != pktl->pkt.dts ||
557 pktl->pkt.dts != AV_NOPTS_VALUE ||
563 pktl = s->internal->packet_buffer;
564 st->first_dts = cur_dts;
565 } else if (st->cur_dts)
568 for (; pktl; pktl = pktl->next) {
569 if (pktl->pkt.stream_index != stream_index)
571 if (pktl->pkt.pts == pktl->pkt.dts &&
572 pktl->pkt.dts == AV_NOPTS_VALUE &&
573 !pktl->pkt.duration) {
574 pktl->pkt.dts = cur_dts;
575 if (!st->codec->has_b_frames)
576 pktl->pkt.pts = cur_dts;
578 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
579 pktl->pkt.duration = duration;
583 if (st->first_dts == AV_NOPTS_VALUE)
584 st->cur_dts = cur_dts;
587 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
588 AVCodecParserContext *pc, AVPacket *pkt)
590 int num, den, presentation_delayed, delay, i;
593 if (s->flags & AVFMT_FLAG_NOFILLIN)
596 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
597 pkt->dts = AV_NOPTS_VALUE;
599 /* do we have a video B-frame ? */
600 delay = st->codec->has_b_frames;
601 presentation_delayed = 0;
603 /* XXX: need has_b_frame, but cannot get it if the codec is
606 pc && pc->pict_type != AV_PICTURE_TYPE_B)
607 presentation_delayed = 1;
609 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
610 st->pts_wrap_bits < 63 &&
611 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
612 pkt->dts -= 1LL << st->pts_wrap_bits;
615 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
616 * We take the conservative approach and discard both.
617 * Note: If this is misbehaving for an H.264 file, then possibly
618 * presentation_delayed is not set correctly. */
619 if (delay == 1 && pkt->dts == pkt->pts &&
620 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
621 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
622 pkt->dts = AV_NOPTS_VALUE;
625 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
626 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
628 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
629 den * (int64_t) st->time_base.num,
632 if (pkt->duration != 0 && s->internal->packet_buffer)
633 update_initial_durations(s, st, pkt->stream_index,
638 /* Correct timestamps with byte offset if demuxers only have timestamps
639 * on packet boundaries */
640 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
641 /* this will estimate bitrate based on this frame's duration and size */
642 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
643 if (pkt->pts != AV_NOPTS_VALUE)
645 if (pkt->dts != AV_NOPTS_VALUE)
649 /* This may be redundant, but it should not hurt. */
650 if (pkt->dts != AV_NOPTS_VALUE &&
651 pkt->pts != AV_NOPTS_VALUE &&
653 presentation_delayed = 1;
655 av_log(NULL, AV_LOG_TRACE,
656 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
657 "cur_dts:%"PRId64" st:%d pc:%p\n",
658 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
659 pkt->stream_index, pc);
660 /* Interpolate PTS and DTS if they are not present. We skip H.264
661 * currently because delay and has_b_frames are not reliably set. */
662 if ((delay == 0 || (delay == 1 && pc)) &&
663 st->codec->codec_id != AV_CODEC_ID_H264) {
664 if (presentation_delayed) {
665 /* DTS = decompression timestamp */
666 /* PTS = presentation timestamp */
667 if (pkt->dts == AV_NOPTS_VALUE)
668 pkt->dts = st->last_IP_pts;
669 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
670 if (pkt->dts == AV_NOPTS_VALUE)
671 pkt->dts = st->cur_dts;
673 /* This is tricky: the dts must be incremented by the duration
674 * of the frame we are displaying, i.e. the last I- or P-frame. */
675 if (st->last_IP_duration == 0)
676 st->last_IP_duration = pkt->duration;
677 if (pkt->dts != AV_NOPTS_VALUE)
678 st->cur_dts = pkt->dts + st->last_IP_duration;
679 st->last_IP_duration = pkt->duration;
680 st->last_IP_pts = pkt->pts;
681 /* Cannot compute PTS if not present (we can compute it only
682 * by knowing the future. */
683 } else if (pkt->pts != AV_NOPTS_VALUE ||
684 pkt->dts != AV_NOPTS_VALUE ||
686 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
687 int duration = pkt->duration;
688 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
689 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
691 duration = av_rescale_rnd(1,
692 num * (int64_t) st->time_base.den,
693 den * (int64_t) st->time_base.num,
695 if (duration != 0 && s->internal->packet_buffer)
696 update_initial_durations(s, st, pkt->stream_index,
701 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
703 /* presentation is not delayed : PTS and DTS are the same */
704 if (pkt->pts == AV_NOPTS_VALUE)
706 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
708 if (pkt->pts == AV_NOPTS_VALUE)
709 pkt->pts = st->cur_dts;
711 if (pkt->pts != AV_NOPTS_VALUE)
712 st->cur_dts = pkt->pts + duration;
717 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
718 st->pts_buffer[0] = pkt->pts;
719 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
720 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->pts_buffer[0];
723 // We skipped it above so we try here.
724 if (st->codec->codec_id == AV_CODEC_ID_H264)
725 // This should happen on the first packet
726 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
727 if (pkt->dts > st->cur_dts)
728 st->cur_dts = pkt->dts;
731 av_log(NULL, AV_LOG_TRACE,
732 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
733 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
736 if (is_intra_only(st->codec->codec_id))
737 pkt->flags |= AV_PKT_FLAG_KEY;
739 pkt->convergence_duration = pc->convergence_duration;
742 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
745 AVPacketList *pktl = *pkt_buf;
746 *pkt_buf = pktl->next;
747 av_free_packet(&pktl->pkt);
754 * Parse a packet, add all split parts to parse_queue.
756 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
758 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
760 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
761 AVStream *st = s->streams[stream_index];
762 uint8_t *data = pkt ? pkt->data : NULL;
763 int size = pkt ? pkt->size : 0;
764 int ret = 0, got_output = 0;
767 av_init_packet(&flush_pkt);
772 while (size > 0 || (pkt == &flush_pkt && got_output)) {
775 av_init_packet(&out_pkt);
776 len = av_parser_parse2(st->parser, st->codec,
777 &out_pkt.data, &out_pkt.size, data, size,
778 pkt->pts, pkt->dts, pkt->pos);
780 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
781 /* increment read pointer */
785 got_output = !!out_pkt.size;
790 if (pkt->side_data) {
791 out_pkt.side_data = pkt->side_data;
792 out_pkt.side_data_elems = pkt->side_data_elems;
793 pkt->side_data = NULL;
794 pkt->side_data_elems = 0;
797 /* set the duration */
798 out_pkt.duration = 0;
799 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
800 if (st->codec->sample_rate > 0) {
802 av_rescale_q_rnd(st->parser->duration,
803 (AVRational) { 1, st->codec->sample_rate },
809 out_pkt.stream_index = st->index;
810 out_pkt.pts = st->parser->pts;
811 out_pkt.dts = st->parser->dts;
812 out_pkt.pos = st->parser->pos;
814 if (st->parser->key_frame == 1 ||
815 (st->parser->key_frame == -1 &&
816 st->parser->pict_type == AV_PICTURE_TYPE_I))
817 out_pkt.flags |= AV_PKT_FLAG_KEY;
819 compute_pkt_fields(s, st, st->parser, &out_pkt);
821 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
822 out_pkt.flags & AV_PKT_FLAG_KEY) {
823 ff_reduce_index(s, st->index);
824 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
825 0, 0, AVINDEX_KEYFRAME);
828 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
829 out_pkt.buf = pkt->buf;
832 if ((ret = av_dup_packet(&out_pkt)) < 0)
835 if (!add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end)) {
836 av_free_packet(&out_pkt);
837 ret = AVERROR(ENOMEM);
842 /* end of the stream => close and free the parser */
843 if (pkt == &flush_pkt) {
844 av_parser_close(st->parser);
853 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
854 AVPacketList **pkt_buffer_end,
858 av_assert0(*pkt_buffer);
861 *pkt_buffer = pktl->next;
863 *pkt_buffer_end = NULL;
868 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
870 int ret = 0, i, got_packet = 0;
871 AVDictionary *metadata = NULL;
875 while (!got_packet && !s->internal->parse_queue) {
879 /* read next packet */
880 ret = ff_read_packet(s, &cur_pkt);
882 if (ret == AVERROR(EAGAIN))
884 /* flush the parsers */
885 for (i = 0; i < s->nb_streams; i++) {
887 if (st->parser && st->need_parsing)
888 parse_packet(s, NULL, st->index);
890 /* all remaining packets are now in parse_queue =>
891 * really terminate parsing */
895 st = s->streams[cur_pkt.stream_index];
897 if (cur_pkt.pts != AV_NOPTS_VALUE &&
898 cur_pkt.dts != AV_NOPTS_VALUE &&
899 cur_pkt.pts < cur_pkt.dts) {
900 av_log(s, AV_LOG_WARNING,
901 "Invalid timestamps stream=%d, pts=%"PRId64", "
902 "dts=%"PRId64", size=%d\n",
903 cur_pkt.stream_index, cur_pkt.pts,
904 cur_pkt.dts, cur_pkt.size);
906 if (s->debug & FF_FDEBUG_TS)
907 av_log(s, AV_LOG_DEBUG,
908 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
909 "size=%d, duration=%d, flags=%d\n",
910 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
911 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
913 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
914 st->parser = av_parser_init(st->codec->codec_id);
916 /* no parser available: just output the raw packets */
917 st->need_parsing = AVSTREAM_PARSE_NONE;
918 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
919 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
920 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
921 st->parser->flags |= PARSER_FLAG_ONCE;
924 if (!st->need_parsing || !st->parser) {
925 /* no parsing needed: we just output the packet as is */
927 compute_pkt_fields(s, st, NULL, pkt);
928 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
929 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
930 ff_reduce_index(s, st->index);
931 av_add_index_entry(st, pkt->pos, pkt->dts,
932 0, 0, AVINDEX_KEYFRAME);
935 } else if (st->discard < AVDISCARD_ALL) {
936 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
940 av_free_packet(&cur_pkt);
944 if (!got_packet && s->internal->parse_queue)
945 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
947 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
949 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
950 av_dict_copy(&s->metadata, metadata, 0);
951 av_dict_free(&metadata);
952 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
955 if (s->debug & FF_FDEBUG_TS)
956 av_log(s, AV_LOG_DEBUG,
957 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
958 "size=%d, duration=%d, flags=%d\n",
959 pkt->stream_index, pkt->pts, pkt->dts,
960 pkt->size, pkt->duration, pkt->flags);
965 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
967 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
971 return s->internal->packet_buffer
972 ? read_from_packet_buffer(&s->internal->packet_buffer,
973 &s->internal->packet_buffer_end, pkt)
974 : read_frame_internal(s, pkt);
978 AVPacketList *pktl = s->internal->packet_buffer;
981 AVPacket *next_pkt = &pktl->pkt;
983 if (next_pkt->dts != AV_NOPTS_VALUE) {
984 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
985 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
986 if (pktl->pkt.stream_index == next_pkt->stream_index &&
987 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
988 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
990 next_pkt->pts = pktl->pkt.dts;
994 pktl = s->internal->packet_buffer;
997 /* read packet from packet buffer, if there is data */
998 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
999 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1000 return read_from_packet_buffer(&s->internal->packet_buffer,
1001 &s->internal->packet_buffer_end, pkt);
1004 ret = read_frame_internal(s, pkt);
1006 if (pktl && ret != AVERROR(EAGAIN)) {
1013 if (av_dup_packet(add_to_pktbuf(&s->internal->packet_buffer, pkt,
1014 &s->internal->packet_buffer_end)) < 0)
1015 return AVERROR(ENOMEM);
1019 /* XXX: suppress the packet queue */
1020 static void flush_packet_queue(AVFormatContext *s)
1022 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1023 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1024 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1026 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1029 /*******************************************************/
1032 int av_find_default_stream_index(AVFormatContext *s)
1034 int first_audio_index = -1;
1038 if (s->nb_streams <= 0)
1040 for (i = 0; i < s->nb_streams; i++) {
1042 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1043 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1046 if (first_audio_index < 0 &&
1047 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1048 first_audio_index = i;
1050 return first_audio_index >= 0 ? first_audio_index : 0;
1053 /** Flush the frame reader. */
1054 void ff_read_frame_flush(AVFormatContext *s)
1059 flush_packet_queue(s);
1061 /* Reset read state for each stream. */
1062 for (i = 0; i < s->nb_streams; i++) {
1066 av_parser_close(st->parser);
1069 st->last_IP_pts = AV_NOPTS_VALUE;
1070 /* We set the current DTS to an unspecified origin. */
1071 st->cur_dts = AV_NOPTS_VALUE;
1073 st->probe_packets = MAX_PROBE_PACKETS;
1075 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1076 st->pts_buffer[j] = AV_NOPTS_VALUE;
1080 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1084 for (i = 0; i < s->nb_streams; i++) {
1085 AVStream *st = s->streams[i];
1088 av_rescale(timestamp,
1089 st->time_base.den * (int64_t) ref_st->time_base.num,
1090 st->time_base.num * (int64_t) ref_st->time_base.den);
1094 void ff_reduce_index(AVFormatContext *s, int stream_index)
1096 AVStream *st = s->streams[stream_index];
1097 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1099 if ((unsigned) st->nb_index_entries >= max_entries) {
1101 for (i = 0; 2 * i < st->nb_index_entries; i++)
1102 st->index_entries[i] = st->index_entries[2 * i];
1103 st->nb_index_entries = i;
1107 int ff_add_index_entry(AVIndexEntry **index_entries,
1108 int *nb_index_entries,
1109 unsigned int *index_entries_allocated_size,
1110 int64_t pos, int64_t timestamp,
1111 int size, int distance, int flags)
1113 AVIndexEntry *entries, *ie;
1116 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1119 entries = av_fast_realloc(*index_entries,
1120 index_entries_allocated_size,
1121 (*nb_index_entries + 1) *
1122 sizeof(AVIndexEntry));
1126 *index_entries = entries;
1128 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1129 timestamp, AVSEEK_FLAG_ANY);
1132 index = (*nb_index_entries)++;
1133 ie = &entries[index];
1134 assert(index == 0 || ie[-1].timestamp < timestamp);
1136 ie = &entries[index];
1137 if (ie->timestamp != timestamp) {
1138 if (ie->timestamp <= timestamp)
1140 memmove(entries + index + 1, entries + index,
1141 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1142 (*nb_index_entries)++;
1143 } else if (ie->pos == pos && distance < ie->min_distance)
1144 // do not reduce the distance
1145 distance = ie->min_distance;
1149 ie->timestamp = timestamp;
1150 ie->min_distance = distance;
1157 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1158 int size, int distance, int flags)
1160 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1161 &st->index_entries_allocated_size, pos,
1162 timestamp, size, distance, flags);
1165 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1166 int64_t wanted_timestamp, int flags)
1174 // Optimize appending index entries at the end.
1175 if (b && entries[b - 1].timestamp < wanted_timestamp)
1180 timestamp = entries[m].timestamp;
1181 if (timestamp >= wanted_timestamp)
1183 if (timestamp <= wanted_timestamp)
1186 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1188 if (!(flags & AVSEEK_FLAG_ANY))
1189 while (m >= 0 && m < nb_entries &&
1190 !(entries[m].flags & AVINDEX_KEYFRAME))
1191 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1193 if (m == nb_entries)
1198 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1200 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1201 wanted_timestamp, flags);
1204 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1205 int64_t target_ts, int flags)
1207 AVInputFormat *avif = s->iformat;
1208 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1209 int64_t ts_min, ts_max, ts;
1214 if (stream_index < 0)
1217 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1220 ts_min = AV_NOPTS_VALUE;
1221 pos_limit = -1; // GCC falsely says it may be uninitialized.
1223 st = s->streams[stream_index];
1224 if (st->index_entries) {
1227 /* FIXME: Whole function must be checked for non-keyframe entries in
1228 * index case, especially read_timestamp(). */
1229 index = av_index_search_timestamp(st, target_ts,
1230 flags | AVSEEK_FLAG_BACKWARD);
1231 index = FFMAX(index, 0);
1232 e = &st->index_entries[index];
1234 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1236 ts_min = e->timestamp;
1237 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1243 index = av_index_search_timestamp(st, target_ts,
1244 flags & ~AVSEEK_FLAG_BACKWARD);
1245 assert(index < st->nb_index_entries);
1247 e = &st->index_entries[index];
1248 assert(e->timestamp >= target_ts);
1250 ts_max = e->timestamp;
1251 pos_limit = pos_max - e->min_distance;
1252 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1253 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1257 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1258 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1263 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1266 ff_update_cur_dts(s, st, ts);
1271 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1272 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1273 int64_t ts_min, int64_t ts_max,
1274 int flags, int64_t *ts_ret,
1275 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1276 int64_t *, int64_t))
1279 int64_t start_pos, filesize;
1282 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1284 if (ts_min == AV_NOPTS_VALUE) {
1285 pos_min = s->internal->data_offset;
1286 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1287 if (ts_min == AV_NOPTS_VALUE)
1291 if (ts_max == AV_NOPTS_VALUE) {
1293 filesize = avio_size(s->pb);
1294 pos_max = filesize - 1;
1297 ts_max = read_timestamp(s, stream_index, &pos_max,
1300 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1301 if (ts_max == AV_NOPTS_VALUE)
1305 int64_t tmp_pos = pos_max + 1;
1306 int64_t tmp_ts = read_timestamp(s, stream_index,
1307 &tmp_pos, INT64_MAX);
1308 if (tmp_ts == AV_NOPTS_VALUE)
1312 if (tmp_pos >= filesize)
1315 pos_limit = pos_max;
1318 if (ts_min > ts_max)
1320 else if (ts_min == ts_max)
1321 pos_limit = pos_min;
1324 while (pos_min < pos_limit) {
1325 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1326 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1327 assert(pos_limit <= pos_max);
1329 if (no_change == 0) {
1330 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1331 // interpolate position (better than dichotomy)
1332 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1334 pos_min - approximate_keyframe_distance;
1335 } else if (no_change == 1) {
1336 // bisection if interpolation did not change min / max pos last time
1337 pos = (pos_min + pos_limit) >> 1;
1339 /* linear search if bisection failed, can only happen if there
1340 * are very few or no keyframes between min/max */
1345 else if (pos > pos_limit)
1349 // May pass pos_limit instead of -1.
1350 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1355 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1356 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1357 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1358 pos_limit, start_pos, no_change);
1359 if (ts == AV_NOPTS_VALUE) {
1360 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1363 assert(ts != AV_NOPTS_VALUE);
1364 if (target_ts <= ts) {
1365 pos_limit = start_pos - 1;
1369 if (target_ts >= ts) {
1375 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1376 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1378 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1380 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1381 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1382 pos, ts_min, target_ts, ts_max);
1387 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1388 int64_t pos, int flags)
1390 int64_t pos_min, pos_max;
1392 pos_min = s->internal->data_offset;
1393 pos_max = avio_size(s->pb) - 1;
1397 else if (pos > pos_max)
1400 avio_seek(s->pb, pos, SEEK_SET);
1405 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1406 int64_t timestamp, int flags)
1413 st = s->streams[stream_index];
1415 index = av_index_search_timestamp(st, timestamp, flags);
1417 if (index < 0 && st->nb_index_entries &&
1418 timestamp < st->index_entries[0].timestamp)
1421 if (index < 0 || index == st->nb_index_entries - 1) {
1424 if (st->nb_index_entries) {
1425 assert(st->index_entries);
1426 ie = &st->index_entries[st->nb_index_entries - 1];
1427 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1429 ff_update_cur_dts(s, st, ie->timestamp);
1431 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1437 read_status = av_read_frame(s, &pkt);
1438 } while (read_status == AVERROR(EAGAIN));
1439 if (read_status < 0)
1441 av_free_packet(&pkt);
1442 if (stream_index == pkt.stream_index)
1443 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1446 index = av_index_search_timestamp(st, timestamp, flags);
1451 ff_read_frame_flush(s);
1452 if (s->iformat->read_seek)
1453 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1455 ie = &st->index_entries[index];
1456 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1458 ff_update_cur_dts(s, st, ie->timestamp);
1463 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1464 int64_t timestamp, int flags)
1469 if (flags & AVSEEK_FLAG_BYTE) {
1470 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1472 ff_read_frame_flush(s);
1473 return seek_frame_byte(s, stream_index, timestamp, flags);
1476 if (stream_index < 0) {
1477 stream_index = av_find_default_stream_index(s);
1478 if (stream_index < 0)
1481 st = s->streams[stream_index];
1482 /* timestamp for default must be expressed in AV_TIME_BASE units */
1483 timestamp = av_rescale(timestamp, st->time_base.den,
1484 AV_TIME_BASE * (int64_t) st->time_base.num);
1487 /* first, we try the format specific seek */
1488 if (s->iformat->read_seek) {
1489 ff_read_frame_flush(s);
1490 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1496 if (s->iformat->read_timestamp &&
1497 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1498 ff_read_frame_flush(s);
1499 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1500 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1501 ff_read_frame_flush(s);
1502 return seek_frame_generic(s, stream_index, timestamp, flags);
1507 int av_seek_frame(AVFormatContext *s, int stream_index,
1508 int64_t timestamp, int flags)
1510 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1513 ret = queue_attached_pictures(s);
1518 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1519 int64_t ts, int64_t max_ts, int flags)
1521 if (min_ts > ts || max_ts < ts)
1524 if (s->iformat->read_seek2) {
1526 ff_read_frame_flush(s);
1527 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1531 ret = queue_attached_pictures(s);
1535 if (s->iformat->read_timestamp) {
1536 // try to seek via read_timestamp()
1539 // Fall back on old API if new is not implemented but old is.
1540 // Note the old API has somewhat different semantics.
1541 if (s->iformat->read_seek || 1)
1542 return av_seek_frame(s, stream_index, ts,
1543 flags | ((uint64_t) ts - min_ts >
1544 (uint64_t) max_ts - ts
1545 ? AVSEEK_FLAG_BACKWARD : 0));
1547 // try some generic seek like seek_frame_generic() but with new ts semantics
1550 /*******************************************************/
1553 * Return TRUE if the stream has accurate duration in any stream.
1555 * @return TRUE if the stream has accurate duration for at least one component.
1557 static int has_duration(AVFormatContext *ic)
1562 for (i = 0; i < ic->nb_streams; i++) {
1563 st = ic->streams[i];
1564 if (st->duration != AV_NOPTS_VALUE)
1567 if (ic->duration != AV_NOPTS_VALUE)
1573 * Estimate the stream timings from the one of each components.
1575 * Also computes the global bitrate if possible.
1577 static void update_stream_timings(AVFormatContext *ic)
1579 int64_t start_time, start_time1, end_time, end_time1;
1580 int64_t duration, duration1, filesize;
1584 start_time = INT64_MAX;
1585 end_time = INT64_MIN;
1586 duration = INT64_MIN;
1587 for (i = 0; i < ic->nb_streams; i++) {
1588 st = ic->streams[i];
1589 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1590 start_time1 = av_rescale_q(st->start_time, st->time_base,
1592 start_time = FFMIN(start_time, start_time1);
1593 if (st->duration != AV_NOPTS_VALUE) {
1594 end_time1 = start_time1 +
1595 av_rescale_q(st->duration, st->time_base,
1597 end_time = FFMAX(end_time, end_time1);
1600 if (st->duration != AV_NOPTS_VALUE) {
1601 duration1 = av_rescale_q(st->duration, st->time_base,
1603 duration = FFMAX(duration, duration1);
1606 if (start_time != INT64_MAX) {
1607 ic->start_time = start_time;
1608 if (end_time != INT64_MIN)
1609 duration = FFMAX(duration, end_time - start_time);
1611 if (duration != INT64_MIN) {
1612 ic->duration = duration;
1613 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1614 /* compute the bitrate */
1615 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1616 (double) ic->duration;
1620 static void fill_all_stream_timings(AVFormatContext *ic)
1625 update_stream_timings(ic);
1626 for (i = 0; i < ic->nb_streams; i++) {
1627 st = ic->streams[i];
1628 if (st->start_time == AV_NOPTS_VALUE) {
1629 if (ic->start_time != AV_NOPTS_VALUE)
1630 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1632 if (ic->duration != AV_NOPTS_VALUE)
1633 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1639 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1641 int64_t filesize, duration;
1645 /* if bit_rate is already set, we believe it */
1646 if (ic->bit_rate <= 0) {
1648 for (i = 0; i < ic->nb_streams; i++) {
1649 st = ic->streams[i];
1650 if (st->codec->bit_rate > 0) {
1651 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1655 bit_rate += st->codec->bit_rate;
1658 ic->bit_rate = bit_rate;
1661 /* if duration is already set, we believe it */
1662 if (ic->duration == AV_NOPTS_VALUE &&
1663 ic->bit_rate != 0) {
1664 filesize = ic->pb ? avio_size(ic->pb) : 0;
1666 for (i = 0; i < ic->nb_streams; i++) {
1667 st = ic->streams[i];
1668 duration = av_rescale(8 * filesize, st->time_base.den,
1670 (int64_t) st->time_base.num);
1671 if (st->duration == AV_NOPTS_VALUE)
1672 st->duration = duration;
1678 #define DURATION_MAX_READ_SIZE 250000
1679 #define DURATION_MAX_RETRY 3
1681 /* only usable for MPEG-PS streams */
1682 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1684 AVPacket pkt1, *pkt = &pkt1;
1686 int read_size, i, ret;
1688 int64_t filesize, offset, duration;
1691 /* flush packet queue */
1692 flush_packet_queue(ic);
1694 for (i = 0; i < ic->nb_streams; i++) {
1695 st = ic->streams[i];
1696 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1697 av_log(st->codec, AV_LOG_WARNING,
1698 "start time is not set in estimate_timings_from_pts\n");
1701 av_parser_close(st->parser);
1706 /* estimate the end time (duration) */
1707 /* XXX: may need to support wrapping */
1708 filesize = ic->pb ? avio_size(ic->pb) : 0;
1709 end_time = AV_NOPTS_VALUE;
1711 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1715 avio_seek(ic->pb, offset, SEEK_SET);
1718 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1722 ret = ff_read_packet(ic, pkt);
1723 } while (ret == AVERROR(EAGAIN));
1726 read_size += pkt->size;
1727 st = ic->streams[pkt->stream_index];
1728 if (pkt->pts != AV_NOPTS_VALUE &&
1729 (st->start_time != AV_NOPTS_VALUE ||
1730 st->first_dts != AV_NOPTS_VALUE)) {
1731 duration = end_time = pkt->pts;
1732 if (st->start_time != AV_NOPTS_VALUE)
1733 duration -= st->start_time;
1735 duration -= st->first_dts;
1737 duration += 1LL << st->pts_wrap_bits;
1739 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1740 st->duration = duration;
1743 av_free_packet(pkt);
1745 } while (end_time == AV_NOPTS_VALUE &&
1746 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1747 ++retry <= DURATION_MAX_RETRY);
1749 fill_all_stream_timings(ic);
1751 avio_seek(ic->pb, old_offset, SEEK_SET);
1752 for (i = 0; i < ic->nb_streams; i++) {
1753 st = ic->streams[i];
1754 st->cur_dts = st->first_dts;
1755 st->last_IP_pts = AV_NOPTS_VALUE;
1759 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1763 /* get the file size, if possible */
1764 if (ic->iformat->flags & AVFMT_NOFILE) {
1767 file_size = avio_size(ic->pb);
1768 file_size = FFMAX(0, file_size);
1771 if ((!strcmp(ic->iformat->name, "mpeg") ||
1772 !strcmp(ic->iformat->name, "mpegts")) &&
1773 file_size && ic->pb->seekable) {
1774 /* get accurate estimate from the PTSes */
1775 estimate_timings_from_pts(ic, old_offset);
1776 } else if (has_duration(ic)) {
1777 /* at least one component has timings - we use them for all
1779 fill_all_stream_timings(ic);
1781 av_log(ic, AV_LOG_WARNING,
1782 "Estimating duration from bitrate, this may be inaccurate\n");
1783 /* less precise: use bitrate info */
1784 estimate_timings_from_bit_rate(ic);
1786 update_stream_timings(ic);
1790 AVStream av_unused *st;
1791 for (i = 0; i < ic->nb_streams; i++) {
1792 st = ic->streams[i];
1793 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1794 (double) st->start_time / AV_TIME_BASE,
1795 (double) st->duration / AV_TIME_BASE);
1797 av_log(ic, AV_LOG_TRACE,
1798 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1799 (double) ic->start_time / AV_TIME_BASE,
1800 (double) ic->duration / AV_TIME_BASE,
1801 ic->bit_rate / 1000);
1805 static int has_codec_parameters(AVStream *st)
1807 AVCodecContext *avctx = st->codec;
1810 switch (avctx->codec_type) {
1811 case AVMEDIA_TYPE_AUDIO:
1812 val = avctx->sample_rate && avctx->channels;
1813 if (st->info->found_decoder >= 0 &&
1814 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1817 case AVMEDIA_TYPE_VIDEO:
1819 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1826 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1829 static int has_decode_delay_been_guessed(AVStream *st)
1831 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1832 st->info->nb_decoded_frames >= 6;
1835 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1836 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1837 AVDictionary **options)
1839 const AVCodec *codec;
1840 int got_picture = 1, ret = 0;
1841 AVFrame *frame = av_frame_alloc();
1842 AVPacket pkt = *avpkt;
1845 return AVERROR(ENOMEM);
1847 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1848 AVDictionary *thread_opt = NULL;
1850 codec = st->codec->codec ? st->codec->codec
1851 : avcodec_find_decoder(st->codec->codec_id);
1854 st->info->found_decoder = -1;
1859 /* Force thread count to 1 since the H.264 decoder will not extract
1860 * SPS and PPS to extradata during multi-threaded decoding. */
1861 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1862 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1864 av_dict_free(&thread_opt);
1866 st->info->found_decoder = -1;
1869 st->info->found_decoder = 1;
1870 } else if (!st->info->found_decoder)
1871 st->info->found_decoder = 1;
1873 if (st->info->found_decoder < 0) {
1878 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1880 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1881 (!st->codec_info_nb_frames &&
1882 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1884 switch (st->codec->codec_type) {
1885 case AVMEDIA_TYPE_VIDEO:
1886 ret = avcodec_decode_video2(st->codec, frame,
1887 &got_picture, &pkt);
1889 case AVMEDIA_TYPE_AUDIO:
1890 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1897 st->info->nb_decoded_frames++;
1905 av_frame_free(&frame);
1909 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1911 while (tags->id != AV_CODEC_ID_NONE) {
1919 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1922 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1923 if (tag == tags[i].tag)
1925 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1926 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1928 return AV_CODEC_ID_NONE;
1931 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1936 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1938 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1940 return AV_CODEC_ID_NONE;
1944 if (sflags & (1 << (bps - 1))) {
1947 return AV_CODEC_ID_PCM_S8;
1949 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1951 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1953 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1955 return AV_CODEC_ID_NONE;
1960 return AV_CODEC_ID_PCM_U8;
1962 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1964 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1966 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1968 return AV_CODEC_ID_NONE;
1974 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1977 for (i = 0; tags && tags[i]; i++) {
1978 int tag = ff_codec_get_tag(tags[i], id);
1985 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
1988 for (i = 0; tags && tags[i]; i++) {
1989 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
1990 if (id != AV_CODEC_ID_NONE)
1993 return AV_CODEC_ID_NONE;
1996 static void compute_chapters_end(AVFormatContext *s)
1999 int64_t max_time = s->duration +
2000 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2002 for (i = 0; i < s->nb_chapters; i++)
2003 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2004 AVChapter *ch = s->chapters[i];
2005 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2009 for (j = 0; j < s->nb_chapters; j++) {
2010 AVChapter *ch1 = s->chapters[j];
2011 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2013 if (j != i && next_start > ch->start && next_start < end)
2016 ch->end = (end == INT64_MAX) ? ch->start : end;
2020 static int get_std_framerate(int i)
2023 return (i + 1) * 1001;
2025 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2028 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2030 int i, count, ret, read_size, j;
2032 AVPacket pkt1, *pkt;
2033 int64_t old_offset = avio_tell(ic->pb);
2034 // new streams might appear, no options for those
2035 int orig_nb_streams = ic->nb_streams;
2037 for (i = 0; i < ic->nb_streams; i++) {
2038 const AVCodec *codec;
2039 AVDictionary *thread_opt = NULL;
2040 st = ic->streams[i];
2042 // only for the split stuff
2043 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2044 st->parser = av_parser_init(st->codec->codec_id);
2045 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2046 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2048 codec = st->codec->codec ? st->codec->codec
2049 : avcodec_find_decoder(st->codec->codec_id);
2051 /* Force thread count to 1 since the H.264 decoder will not extract
2052 * SPS and PPS to extradata during multi-threaded decoding. */
2053 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2055 /* Ensure that subtitle_header is properly set. */
2056 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2057 && codec && !st->codec->codec)
2058 avcodec_open2(st->codec, codec,
2059 options ? &options[i] : &thread_opt);
2061 // Try to just open decoders, in case this is enough to get parameters.
2062 if (!has_codec_parameters(st)) {
2063 if (codec && !st->codec->codec)
2064 avcodec_open2(st->codec, codec,
2065 options ? &options[i] : &thread_opt);
2068 av_dict_free(&thread_opt);
2071 for (i = 0; i < ic->nb_streams; i++) {
2072 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2073 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2079 if (ff_check_interrupt(&ic->interrupt_callback)) {
2081 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2085 /* check if one codec still needs to be handled */
2086 for (i = 0; i < ic->nb_streams; i++) {
2087 int fps_analyze_framecount = 20;
2089 st = ic->streams[i];
2090 if (!has_codec_parameters(st))
2092 /* If the timebase is coarse (like the usual millisecond precision
2093 * of mkv), we need to analyze more frames to reliably arrive at
2094 * the correct fps. */
2095 if (av_q2d(st->time_base) > 0.0005)
2096 fps_analyze_framecount *= 2;
2097 if (ic->fps_probe_size >= 0)
2098 fps_analyze_framecount = ic->fps_probe_size;
2099 /* variable fps and no guess at the real fps */
2100 if (!st->avg_frame_rate.num &&
2101 st->codec_info_nb_frames < fps_analyze_framecount &&
2102 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2104 if (st->parser && st->parser->parser->split &&
2105 !st->codec->extradata)
2107 if (st->first_dts == AV_NOPTS_VALUE &&
2108 st->codec_info_nb_frames < ic->max_ts_probe &&
2109 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2110 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2113 if (i == ic->nb_streams) {
2114 /* NOTE: If the format has no header, then we need to read some
2115 * packets to get most of the streams, so we cannot stop here. */
2116 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2117 /* If we found the info for all the codecs, we can stop. */
2119 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2123 /* We did not get all the codec info, but we read too much data. */
2124 if (read_size >= ic->probesize) {
2126 av_log(ic, AV_LOG_DEBUG,
2127 "Probe buffer size limit %d reached\n", ic->probesize);
2131 /* NOTE: A new stream can be added there if no header in file
2132 * (AVFMTCTX_NOHEADER). */
2133 ret = read_frame_internal(ic, &pkt1);
2134 if (ret == AVERROR(EAGAIN))
2139 AVPacket empty_pkt = { 0 };
2141 av_init_packet(&empty_pkt);
2143 /* We could not have all the codec parameters before EOF. */
2145 for (i = 0; i < ic->nb_streams; i++) {
2146 st = ic->streams[i];
2148 /* flush the decoders */
2149 if (st->info->found_decoder == 1) {
2151 err = try_decode_frame(st, &empty_pkt,
2152 (options && i < orig_nb_streams)
2153 ? &options[i] : NULL);
2154 } while (err > 0 && !has_codec_parameters(st));
2158 av_log(ic, AV_LOG_WARNING,
2159 "decoding for stream %d failed\n", st->index);
2160 } else if (!has_codec_parameters(st)) {
2162 avcodec_string(buf, sizeof(buf), st->codec, 0);
2163 av_log(ic, AV_LOG_WARNING,
2164 "Could not find codec parameters (%s)\n", buf);
2172 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2175 pkt = add_to_pktbuf(&ic->internal->packet_buffer, &pkt1,
2176 &ic->internal->packet_buffer_end);
2177 if ((ret = av_dup_packet(pkt)) < 0)
2178 goto find_stream_info_err;
2181 read_size += pkt->size;
2183 st = ic->streams[pkt->stream_index];
2184 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2185 /* check for non-increasing dts */
2186 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2187 st->info->fps_last_dts >= pkt->dts) {
2188 av_log(ic, AV_LOG_WARNING,
2189 "Non-increasing DTS in stream %d: packet %d with DTS "
2190 "%"PRId64", packet %d with DTS %"PRId64"\n",
2191 st->index, st->info->fps_last_dts_idx,
2192 st->info->fps_last_dts, st->codec_info_nb_frames,
2194 st->info->fps_first_dts =
2195 st->info->fps_last_dts = AV_NOPTS_VALUE;
2197 /* Check for a discontinuity in dts. If the difference in dts
2198 * is more than 1000 times the average packet duration in the
2199 * sequence, we treat it as a discontinuity. */
2200 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2201 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2202 (pkt->dts - st->info->fps_last_dts) / 1000 >
2203 (st->info->fps_last_dts - st->info->fps_first_dts) /
2204 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2205 av_log(ic, AV_LOG_WARNING,
2206 "DTS discontinuity in stream %d: packet %d with DTS "
2207 "%"PRId64", packet %d with DTS %"PRId64"\n",
2208 st->index, st->info->fps_last_dts_idx,
2209 st->info->fps_last_dts, st->codec_info_nb_frames,
2211 st->info->fps_first_dts =
2212 st->info->fps_last_dts = AV_NOPTS_VALUE;
2215 /* update stored dts values */
2216 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2217 st->info->fps_first_dts = pkt->dts;
2218 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2220 st->info->fps_last_dts = pkt->dts;
2221 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2223 /* check max_analyze_duration */
2224 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2225 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2226 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2227 ic->max_analyze_duration);
2228 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2229 av_packet_unref(pkt);
2233 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2234 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2235 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2236 st->codec->extradata_size = i;
2237 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2238 AV_INPUT_BUFFER_PADDING_SIZE);
2239 if (!st->codec->extradata)
2240 return AVERROR(ENOMEM);
2241 memcpy(st->codec->extradata, pkt->data,
2242 st->codec->extradata_size);
2246 /* If still no information, we try to open the codec and to
2247 * decompress the frame. We try to avoid that in most cases as
2248 * it takes longer and uses more memory. For MPEG-4, we need to
2249 * decompress for QuickTime.
2251 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2252 * least one frame of codec data, this makes sure the codec initializes
2253 * the channel configuration and does not only trust the values from
2255 try_decode_frame(st, pkt,
2256 (options && i < orig_nb_streams) ? &options[i] : NULL);
2258 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2259 av_packet_unref(pkt);
2261 st->codec_info_nb_frames++;
2265 // close codecs which were opened in try_decode_frame()
2266 for (i = 0; i < ic->nb_streams; i++) {
2267 st = ic->streams[i];
2268 avcodec_close(st->codec);
2270 for (i = 0; i < ic->nb_streams; i++) {
2271 st = ic->streams[i];
2272 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2273 /* estimate average framerate if not set by demuxer */
2274 if (!st->avg_frame_rate.num &&
2275 st->info->fps_last_dts != st->info->fps_first_dts) {
2276 int64_t delta_dts = st->info->fps_last_dts -
2277 st->info->fps_first_dts;
2278 int delta_packets = st->info->fps_last_dts_idx -
2279 st->info->fps_first_dts_idx;
2281 double best_error = 0.01;
2283 if (delta_dts >= INT64_MAX / st->time_base.num ||
2284 delta_packets >= INT64_MAX / st->time_base.den ||
2287 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2288 delta_packets * (int64_t) st->time_base.den,
2289 delta_dts * (int64_t) st->time_base.num, 60000);
2291 /* Round guessed framerate to a "standard" framerate if it's
2292 * within 1% of the original estimate. */
2293 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2294 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2295 double error = fabs(av_q2d(st->avg_frame_rate) /
2296 av_q2d(std_fps) - 1);
2298 if (error < best_error) {
2300 best_fps = std_fps.num;
2304 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2305 best_fps, 12 * 1001, INT_MAX);
2307 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2308 if (!st->codec->bits_per_coded_sample)
2309 st->codec->bits_per_coded_sample =
2310 av_get_bits_per_sample(st->codec->codec_id);
2311 // set stream disposition based on audio service type
2312 switch (st->codec->audio_service_type) {
2313 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2314 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2316 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2317 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2319 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2320 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2322 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2323 st->disposition = AV_DISPOSITION_COMMENT;
2325 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2326 st->disposition = AV_DISPOSITION_KARAOKE;
2332 estimate_timings(ic, old_offset);
2334 compute_chapters_end(ic);
2336 find_stream_info_err:
2337 for (i = 0; i < ic->nb_streams; i++) {
2338 ic->streams[i]->codec->thread_count = 0;
2339 av_freep(&ic->streams[i]->info);
2344 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2348 for (i = 0; i < ic->nb_programs; i++)
2349 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2350 if (ic->programs[i]->stream_index[j] == s)
2351 return ic->programs[i];
2355 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2356 int wanted_stream_nb, int related_stream,
2357 AVCodec **decoder_ret, int flags)
2359 int i, nb_streams = ic->nb_streams;
2360 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2361 unsigned *program = NULL;
2362 AVCodec *decoder = NULL, *best_decoder = NULL;
2364 if (related_stream >= 0 && wanted_stream_nb < 0) {
2365 AVProgram *p = find_program_from_stream(ic, related_stream);
2367 program = p->stream_index;
2368 nb_streams = p->nb_stream_indexes;
2371 for (i = 0; i < nb_streams; i++) {
2372 int real_stream_index = program ? program[i] : i;
2373 AVStream *st = ic->streams[real_stream_index];
2374 AVCodecContext *avctx = st->codec;
2375 if (avctx->codec_type != type)
2377 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2379 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2380 AV_DISPOSITION_VISUAL_IMPAIRED))
2383 decoder = avcodec_find_decoder(st->codec->codec_id);
2386 ret = AVERROR_DECODER_NOT_FOUND;
2390 if (best_count >= st->codec_info_nb_frames)
2392 best_count = st->codec_info_nb_frames;
2393 ret = real_stream_index;
2394 best_decoder = decoder;
2395 if (program && i == nb_streams - 1 && ret < 0) {
2397 nb_streams = ic->nb_streams;
2398 /* no related stream found, try again with everything */
2403 *decoder_ret = best_decoder;
2407 /*******************************************************/
2409 int av_read_play(AVFormatContext *s)
2411 if (s->iformat->read_play)
2412 return s->iformat->read_play(s);
2414 return avio_pause(s->pb, 0);
2415 return AVERROR(ENOSYS);
2418 int av_read_pause(AVFormatContext *s)
2420 if (s->iformat->read_pause)
2421 return s->iformat->read_pause(s);
2423 return avio_pause(s->pb, 1);
2424 return AVERROR(ENOSYS);
2427 void avformat_free_context(AVFormatContext *s)
2436 if (s->iformat && s->iformat->priv_class && s->priv_data)
2437 av_opt_free(s->priv_data);
2439 for (i = 0; i < s->nb_streams; i++) {
2440 /* free all data in a stream component */
2443 for (j = 0; j < st->nb_side_data; j++)
2444 av_freep(&st->side_data[j].data);
2445 av_freep(&st->side_data);
2446 st->nb_side_data = 0;
2449 av_parser_close(st->parser);
2451 if (st->attached_pic.data)
2452 av_free_packet(&st->attached_pic);
2453 av_dict_free(&st->metadata);
2454 av_freep(&st->probe_data.buf);
2455 av_free(st->index_entries);
2456 av_free(st->codec->extradata);
2457 av_free(st->codec->subtitle_header);
2459 av_free(st->priv_data);
2463 for (i = s->nb_programs - 1; i >= 0; i--) {
2464 av_dict_free(&s->programs[i]->metadata);
2465 av_freep(&s->programs[i]->stream_index);
2466 av_freep(&s->programs[i]);
2468 av_freep(&s->programs);
2469 av_freep(&s->priv_data);
2470 while (s->nb_chapters--) {
2471 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2472 av_free(s->chapters[s->nb_chapters]);
2474 av_freep(&s->chapters);
2475 av_dict_free(&s->metadata);
2476 av_freep(&s->streams);
2477 av_freep(&s->internal);
2481 void avformat_close_input(AVFormatContext **ps)
2483 AVFormatContext *s = *ps;
2484 AVIOContext *pb = s->pb;
2486 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2487 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2490 flush_packet_queue(s);
2493 if (s->iformat->read_close)
2494 s->iformat->read_close(s);
2496 avformat_free_context(s);
2503 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2508 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2509 sizeof(*s->streams)) < 0) {
2514 st = av_mallocz(sizeof(AVStream));
2517 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2522 st->codec = avcodec_alloc_context3(c);
2529 /* no default bitrate if decoding */
2530 st->codec->bit_rate = 0;
2532 /* default pts setting is MPEG-like */
2533 avpriv_set_pts_info(st, 33, 1, 90000);
2536 st->index = s->nb_streams;
2537 st->start_time = AV_NOPTS_VALUE;
2538 st->duration = AV_NOPTS_VALUE;
2539 /* we set the current DTS to 0 so that formats without any timestamps
2540 * but durations get some timestamps, formats with some unknown
2541 * timestamps have their first few packets buffered and the
2542 * timestamps corrected before they are returned to the user */
2544 st->first_dts = AV_NOPTS_VALUE;
2545 st->probe_packets = MAX_PROBE_PACKETS;
2547 st->last_IP_pts = AV_NOPTS_VALUE;
2548 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2549 st->pts_buffer[i] = AV_NOPTS_VALUE;
2551 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2553 st->info->fps_first_dts = AV_NOPTS_VALUE;
2554 st->info->fps_last_dts = AV_NOPTS_VALUE;
2556 s->streams[s->nb_streams++] = st;
2560 AVProgram *av_new_program(AVFormatContext *ac, int id)
2562 AVProgram *program = NULL;
2565 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2567 for (i = 0; i < ac->nb_programs; i++)
2568 if (ac->programs[i]->id == id)
2569 program = ac->programs[i];
2572 program = av_mallocz(sizeof(AVProgram));
2575 dynarray_add(&ac->programs, &ac->nb_programs, program);
2576 program->discard = AVDISCARD_NONE;
2583 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2584 int64_t start, int64_t end, const char *title)
2586 AVChapter *chapter = NULL;
2589 for (i = 0; i < s->nb_chapters; i++)
2590 if (s->chapters[i]->id == id)
2591 chapter = s->chapters[i];
2594 chapter = av_mallocz(sizeof(AVChapter));
2597 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2599 av_dict_set(&chapter->metadata, "title", title, 0);
2601 chapter->time_base = time_base;
2602 chapter->start = start;
2608 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2611 AVProgram *program = NULL;
2613 if (idx >= ac->nb_streams) {
2614 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2618 for (i = 0; i < ac->nb_programs; i++) {
2619 if (ac->programs[i]->id != progid)
2621 program = ac->programs[i];
2622 for (j = 0; j < program->nb_stream_indexes; j++)
2623 if (program->stream_index[j] == idx)
2626 if (av_reallocp_array(&program->stream_index,
2627 program->nb_stream_indexes + 1,
2628 sizeof(*program->stream_index)) < 0) {
2629 program->nb_stream_indexes = 0;
2632 program->stream_index[program->nb_stream_indexes++] = idx;
2637 uint64_t ff_ntp_time(void)
2639 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2642 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2645 char *q, buf1[20], c;
2646 int nd, len, percentd_found;
2658 while (av_isdigit(*p))
2659 nd = nd * 10 + *p++ - '0';
2661 } while (av_isdigit(c));
2670 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2672 if ((q - buf + len) > buf_size - 1)
2674 memcpy(q, buf1, len);
2682 if ((q - buf) < buf_size - 1)
2686 if (!percentd_found)
2695 void av_url_split(char *proto, int proto_size,
2696 char *authorization, int authorization_size,
2697 char *hostname, int hostname_size,
2698 int *port_ptr, char *path, int path_size, const char *url)
2700 const char *p, *ls, *at, *col, *brk;
2706 if (authorization_size > 0)
2707 authorization[0] = 0;
2708 if (hostname_size > 0)
2713 /* parse protocol */
2714 if ((p = strchr(url, ':'))) {
2715 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2722 /* no protocol means plain filename */
2723 av_strlcpy(path, url, path_size);
2727 /* separate path from hostname */
2728 ls = strchr(p, '/');
2730 ls = strchr(p, '?');
2732 av_strlcpy(path, ls, path_size);
2734 ls = &p[strlen(p)]; // XXX
2736 /* the rest is hostname, use that to parse auth/port */
2738 /* authorization (user[:pass]@hostname) */
2739 if ((at = strchr(p, '@')) && at < ls) {
2740 av_strlcpy(authorization, p,
2741 FFMIN(authorization_size, at + 1 - p));
2742 p = at + 1; /* skip '@' */
2745 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2747 av_strlcpy(hostname, p + 1,
2748 FFMIN(hostname_size, brk - p));
2749 if (brk[1] == ':' && port_ptr)
2750 *port_ptr = atoi(brk + 2);
2751 } else if ((col = strchr(p, ':')) && col < ls) {
2752 av_strlcpy(hostname, p,
2753 FFMIN(col + 1 - p, hostname_size));
2755 *port_ptr = atoi(col + 1);
2757 av_strlcpy(hostname, p,
2758 FFMIN(ls + 1 - p, hostname_size));
2762 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2765 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2768 'C', 'D', 'E', 'F' };
2769 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2772 'c', 'd', 'e', 'f' };
2773 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2775 for (i = 0; i < s; i++) {
2776 buff[i * 2] = hex_table[src[i] >> 4];
2777 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2783 int ff_hex_to_data(uint8_t *data, const char *p)
2790 p += strspn(p, SPACE_CHARS);
2793 c = av_toupper((unsigned char) *p++);
2794 if (c >= '0' && c <= '9')
2796 else if (c >= 'A' && c <= 'F')
2811 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2812 unsigned int pts_num, unsigned int pts_den)
2815 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2816 if (new_tb.num != pts_num)
2817 av_log(NULL, AV_LOG_DEBUG,
2818 "st:%d removing common factor %d from timebase\n",
2819 s->index, pts_num / new_tb.num);
2821 av_log(NULL, AV_LOG_WARNING,
2822 "st:%d has too large timebase, reducing\n", s->index);
2824 if (new_tb.num <= 0 || new_tb.den <= 0) {
2825 av_log(NULL, AV_LOG_ERROR,
2826 "Ignoring attempt to set invalid timebase for st:%d\n",
2830 s->time_base = new_tb;
2831 s->pts_wrap_bits = pts_wrap_bits;
2834 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2837 const char *ptr = str;
2839 /* Parse key=value pairs. */
2842 char *dest = NULL, *dest_end;
2843 int key_len, dest_len = 0;
2845 /* Skip whitespace and potential commas. */
2846 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2853 if (!(ptr = strchr(key, '=')))
2856 key_len = ptr - key;
2858 callback_get_buf(context, key, key_len, &dest, &dest_len);
2859 dest_end = dest + dest_len - 1;
2863 while (*ptr && *ptr != '\"') {
2867 if (dest && dest < dest_end)
2871 if (dest && dest < dest_end)
2879 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2880 if (dest && dest < dest_end)
2888 int ff_find_stream_index(AVFormatContext *s, int id)
2891 for (i = 0; i < s->nb_streams; i++)
2892 if (s->streams[i]->id == id)
2897 int64_t ff_iso8601_to_unix_time(const char *datestr)
2899 struct tm time1 = { 0 }, time2 = { 0 };
2900 const char *ret1, *ret2;
2901 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2902 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2904 return av_timegm(&time2);
2906 return av_timegm(&time1);
2909 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2913 if (ofmt->query_codec)
2914 return ofmt->query_codec(codec_id, std_compliance);
2915 else if (ofmt->codec_tag)
2916 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2917 else if (codec_id == ofmt->video_codec ||
2918 codec_id == ofmt->audio_codec ||
2919 codec_id == ofmt->subtitle_codec)
2922 return AVERROR_PATCHWELCOME;
2925 int avformat_network_init(void)
2929 ff_network_inited_globally = 1;
2930 if ((ret = ff_network_init()) < 0)
2937 int avformat_network_deinit(void)
2946 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2947 uint64_t channel_layout, int32_t sample_rate,
2948 int32_t width, int32_t height)
2954 return AVERROR(EINVAL);
2957 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2959 if (channel_layout) {
2961 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2965 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2967 if (width || height) {
2969 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2971 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2973 return AVERROR(ENOMEM);
2974 bytestream_put_le32(&data, flags);
2976 bytestream_put_le32(&data, channels);
2978 bytestream_put_le64(&data, channel_layout);
2980 bytestream_put_le32(&data, sample_rate);
2981 if (width || height) {
2982 bytestream_put_le32(&data, width);
2983 bytestream_put_le32(&data, height);
2988 int ff_generate_avci_extradata(AVStream *st)
2990 static const uint8_t avci100_1080p_extradata[] = {
2992 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
2993 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
2994 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
2995 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
2996 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
2997 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
2998 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
2999 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3000 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3002 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3005 static const uint8_t avci100_1080i_extradata[] = {
3007 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3008 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3009 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3010 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3011 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3012 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3013 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3014 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3015 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3016 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3017 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3019 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3022 static const uint8_t avci50_1080i_extradata[] = {
3024 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3025 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3026 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3027 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3028 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3029 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3030 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3031 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3032 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3033 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3034 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3036 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3039 static const uint8_t avci100_720p_extradata[] = {
3041 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3042 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3043 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3044 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3045 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3046 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3047 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3048 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3049 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3050 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3052 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3056 const uint8_t *data = NULL;
3059 if (st->codec->width == 1920) {
3060 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3061 data = avci100_1080p_extradata;
3062 size = sizeof(avci100_1080p_extradata);
3064 data = avci100_1080i_extradata;
3065 size = sizeof(avci100_1080i_extradata);
3067 } else if (st->codec->width == 1440) {
3068 data = avci50_1080i_extradata;
3069 size = sizeof(avci50_1080i_extradata);
3070 } else if (st->codec->width == 1280) {
3071 data = avci100_720p_extradata;
3072 size = sizeof(avci100_720p_extradata);
3078 av_freep(&st->codec->extradata);
3079 st->codec->extradata_size = 0;
3080 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3081 if (!st->codec->extradata)
3082 return AVERROR(ENOMEM);
3084 memcpy(st->codec->extradata, data, size);
3085 st->codec->extradata_size = size;
3090 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3095 for (i = 0; i < st->nb_side_data; i++) {
3096 if (st->side_data[i].type == type) {
3098 *size = st->side_data[i].size;
3099 return st->side_data[i].data;
3105 uint8_t *ff_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3108 AVPacketSideData *sd, *tmp;
3110 uint8_t *data = av_malloc(size);
3115 for (i = 0; i < st->nb_side_data; i++) {
3116 sd = &st->side_data[i];
3118 if (sd->type == type) {
3119 av_freep(&sd->data);
3126 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3132 st->side_data = tmp;
3135 sd = &st->side_data[st->nb_side_data - 1];