2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
28 void av_register_input_format(AVInputFormat *format)
32 while (*p != NULL) p = &(*p)->next;
37 void av_register_output_format(AVOutputFormat *format)
41 while (*p != NULL) p = &(*p)->next;
46 int match_ext(const char *filename, const char *extensions)
54 ext = strrchr(filename, '.');
60 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
63 if (!strcasecmp(ext1, ext))
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
76 AVOutputFormat *fmt, *fmt_found;
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
83 return guess_format("image2", NULL, NULL);
85 if (!short_name && filename &&
86 filename_number_test(filename) >= 0 &&
87 guess_image_format(filename)) {
88 return guess_format("image", NULL, NULL);
91 /* find the proper file type */
97 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
99 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
101 if (filename && fmt->extensions &&
102 match_ext(filename, fmt->extensions)) {
105 if (score > score_max) {
114 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
115 const char *mime_type)
117 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
120 AVOutputFormat *stream_fmt;
121 char stream_format_name[64];
123 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
124 stream_fmt = guess_format(stream_format_name, NULL, NULL);
134 * guesses the codec id based upon muxer and filename.
136 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
137 const char *filename, const char *mime_type, enum CodecType type){
138 if(type == CODEC_TYPE_VIDEO){
139 enum CodecID codec_id= CODEC_ID_NONE;
141 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
142 codec_id= av_guess_image2_codec(filename);
144 if(codec_id == CODEC_ID_NONE)
145 codec_id= fmt->video_codec;
147 }else if(type == CODEC_TYPE_AUDIO)
148 return fmt->audio_codec;
150 return CODEC_ID_NONE;
153 AVInputFormat *av_find_input_format(const char *short_name)
156 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
157 if (!strcmp(fmt->name, short_name))
163 /* memory handling */
166 * Default packet destructor
168 static void av_destruct_packet(AVPacket *pkt)
171 pkt->data = NULL; pkt->size = 0;
175 * Allocate the payload of a packet and intialized its fields to default values.
178 * @param size wanted payload size
179 * @return 0 if OK. AVERROR_xxx otherwise.
181 int av_new_packet(AVPacket *pkt, int size)
184 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
185 return AVERROR_NOMEM;
186 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
188 return AVERROR_NOMEM;
189 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
194 pkt->destruct = av_destruct_packet;
198 /* This is a hack - the packet memory allocation stuff is broken. The
199 packet is allocated if it was not really allocated */
200 int av_dup_packet(AVPacket *pkt)
202 if (pkt->destruct != av_destruct_packet) {
204 /* we duplicate the packet and don't forget to put the padding
206 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
207 return AVERROR_NOMEM;
208 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
210 return AVERROR_NOMEM;
212 memcpy(data, pkt->data, pkt->size);
213 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
215 pkt->destruct = av_destruct_packet;
222 int fifo_init(FifoBuffer *f, int size)
224 f->buffer = av_malloc(size);
227 f->end = f->buffer + size;
228 f->wptr = f->rptr = f->buffer;
232 void fifo_free(FifoBuffer *f)
237 int fifo_size(FifoBuffer *f, uint8_t *rptr)
244 if (f->wptr >= rptr) {
245 size = f->wptr - rptr;
247 size = (f->end - rptr) + (f->wptr - f->buffer);
252 /* get data from the fifo (return -1 if not enough data) */
253 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
262 if (f->wptr >= rptr) {
263 size = f->wptr - rptr;
265 size = (f->end - rptr) + (f->wptr - f->buffer);
270 while (buf_size > 0) {
274 memcpy(buf, rptr, len);
285 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
286 unsigned int old_size= f->end - f->buffer;
288 if(old_size < new_size){
289 uint8_t *old= f->buffer;
291 f->buffer= av_realloc(f->buffer, new_size);
293 f->rptr += f->buffer - old;
294 f->wptr += f->buffer - old;
296 if(f->wptr < f->rptr){
297 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
298 f->rptr += new_size - old_size;
300 f->end= f->buffer + new_size;
304 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
317 memcpy(wptr, buf, len);
327 /* get data from the fifo (return -1 if not enough data) */
328 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
330 uint8_t *rptr = *rptr_ptr;
333 if (f->wptr >= rptr) {
334 size = f->wptr - rptr;
336 size = (f->end - rptr) + (f->wptr - f->buffer);
341 while (buf_size > 0) {
345 put_buffer(pb, rptr, len);
355 int filename_number_test(const char *filename)
360 return get_frame_filename(buf, sizeof(buf), filename, 1);
363 /* guess file format */
364 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
366 AVInputFormat *fmt1, *fmt;
367 int score, score_max;
371 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
372 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
375 if (fmt1->read_probe) {
376 score = fmt1->read_probe(pd);
377 } else if (fmt1->extensions) {
378 if (match_ext(pd->filename, fmt1->extensions)) {
382 if (score > score_max) {
390 /************************************************************/
391 /* input media file */
394 * open a media file from an IO stream. 'fmt' must be specified.
397 static const char* format_to_name(void* ptr)
399 AVFormatContext* fc = (AVFormatContext*) ptr;
400 if(fc->iformat) return fc->iformat->name;
401 else if(fc->oformat) return fc->oformat->name;
405 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
407 AVFormatContext *av_alloc_format_context(void)
410 ic = av_mallocz(sizeof(AVFormatContext));
412 ic->av_class = &av_format_context_class;
416 int av_open_input_stream(AVFormatContext **ic_ptr,
417 ByteIOContext *pb, const char *filename,
418 AVInputFormat *fmt, AVFormatParameters *ap)
423 ic = av_alloc_format_context();
431 ic->duration = AV_NOPTS_VALUE;
432 ic->start_time = AV_NOPTS_VALUE;
433 pstrcpy(ic->filename, sizeof(ic->filename), filename);
435 /* allocate private data */
436 if (fmt->priv_data_size > 0) {
437 ic->priv_data = av_mallocz(fmt->priv_data_size);
438 if (!ic->priv_data) {
443 ic->priv_data = NULL;
446 err = ic->iformat->read_header(ic, ap);
451 ic->data_offset = url_ftell(&ic->pb);
457 av_freep(&ic->priv_data);
464 #define PROBE_BUF_SIZE 2048
467 * Open a media file as input. The codec are not opened. Only the file
468 * header (if present) is read.
470 * @param ic_ptr the opened media file handle is put here
471 * @param filename filename to open.
472 * @param fmt if non NULL, force the file format to use
473 * @param buf_size optional buffer size (zero if default is OK)
474 * @param ap additionnal parameters needed when opening the file (NULL if default)
475 * @return 0 if OK. AVERROR_xxx otherwise.
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
482 int err, must_open_file, file_opened;
483 uint8_t buf[PROBE_BUF_SIZE];
484 AVProbeData probe_data, *pd = &probe_data;
485 ByteIOContext pb1, *pb = &pb1;
490 pd->filename = filename;
495 /* guess format if no file can be opened */
496 fmt = av_probe_input_format(pd, 0);
499 /* do not open file if the format does not need it. XXX: specific
500 hack needed to handle RTSP/TCP */
502 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
504 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
507 if (!fmt || must_open_file) {
508 /* if no file needed do not try to open one */
509 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
515 url_setbufsize(pb, buf_size);
518 /* read probe data */
519 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
520 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
522 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
530 /* guess file format */
532 fmt = av_probe_input_format(pd, 1);
535 /* if still no format found, error */
541 /* XXX: suppress this hack for redirectors */
542 #ifdef CONFIG_NETWORK
543 if (fmt == &redir_demux) {
544 err = redir_open(ic_ptr, pb);
550 /* check filename in case of an image number is expected */
551 if (fmt->flags & AVFMT_NEEDNUMBER) {
552 if (filename_number_test(filename) < 0) {
553 err = AVERROR_NUMEXPECTED;
557 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
569 /*******************************************************/
572 * Read a transport packet from a media file. This function is
573 * absolete and should never be used. Use av_read_frame() instead.
575 * @param s media file handle
576 * @param pkt is filled
577 * @return 0 if OK. AVERROR_xxx if error.
579 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
581 return s->iformat->read_packet(s, pkt);
584 /**********************************************************/
586 /* get the number of samples of an audio frame. Return (-1) if error */
587 static int get_audio_frame_size(AVCodecContext *enc, int size)
591 if (enc->frame_size <= 1) {
592 /* specific hack for pcm codecs because no frame size is
594 switch(enc->codec_id) {
595 case CODEC_ID_PCM_S16LE:
596 case CODEC_ID_PCM_S16BE:
597 case CODEC_ID_PCM_U16LE:
598 case CODEC_ID_PCM_U16BE:
599 if (enc->channels == 0)
601 frame_size = size / (2 * enc->channels);
603 case CODEC_ID_PCM_S8:
604 case CODEC_ID_PCM_U8:
605 case CODEC_ID_PCM_MULAW:
606 case CODEC_ID_PCM_ALAW:
607 if (enc->channels == 0)
609 frame_size = size / (enc->channels);
612 /* used for example by ADPCM codecs */
613 if (enc->bit_rate == 0)
615 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
619 frame_size = enc->frame_size;
625 /* return the frame duration in seconds, return 0 if not available */
626 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
627 AVCodecParserContext *pc, AVPacket *pkt)
633 switch(st->codec.codec_type) {
634 case CODEC_TYPE_VIDEO:
635 if(st->time_base.num*1000 > st->time_base.den){
636 *pnum = st->time_base.num;
637 *pden = st->time_base.den;
638 }else if(st->codec.time_base.num*1000 > st->codec.time_base.den){
639 *pnum = st->codec.time_base.num;
640 *pden = st->codec.time_base.den;
641 if (pc && pc->repeat_pict) {
643 *pnum = (*pnum) * (2 + pc->repeat_pict);
647 case CODEC_TYPE_AUDIO:
648 frame_size = get_audio_frame_size(&st->codec, pkt->size);
652 *pden = st->codec.sample_rate;
659 static int is_intra_only(AVCodecContext *enc){
660 if(enc->codec_type == CODEC_TYPE_AUDIO){
662 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
663 switch(enc->codec_id){
665 case CODEC_ID_MJPEGB:
667 case CODEC_ID_RAWVIDEO:
668 case CODEC_ID_DVVIDEO:
669 case CODEC_ID_HUFFYUV:
670 case CODEC_ID_FFVHUFF:
681 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
682 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
683 int64_t delta= last_ts - mask/2;
684 return ((lsb - delta)&mask) + delta;
687 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
688 AVCodecParserContext *pc, AVPacket *pkt)
690 int num, den, presentation_delayed;
691 /* handle wrapping */
692 if(st->cur_dts != AV_NOPTS_VALUE){
693 if(pkt->pts != AV_NOPTS_VALUE)
694 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
695 if(pkt->dts != AV_NOPTS_VALUE)
696 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
699 if (pkt->duration == 0) {
700 compute_frame_duration(&num, &den, st, pc, pkt);
702 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
706 if(is_intra_only(&st->codec))
707 pkt->flags |= PKT_FLAG_KEY;
709 /* do we have a video B frame ? */
710 presentation_delayed = 0;
711 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
712 /* XXX: need has_b_frame, but cannot get it if the codec is
714 if (( st->codec.codec_id == CODEC_ID_H264
715 || st->codec.has_b_frames) &&
716 pc && pc->pict_type != FF_B_TYPE)
717 presentation_delayed = 1;
718 /* this may be redundant, but it shouldnt hurt */
719 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
720 presentation_delayed = 1;
723 if(st->cur_dts == AV_NOPTS_VALUE){
724 if(presentation_delayed) st->cur_dts = -pkt->duration;
725 else st->cur_dts = 0;
728 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
729 /* interpolate PTS and DTS if they are not present */
730 if (presentation_delayed) {
731 /* DTS = decompression time stamp */
732 /* PTS = presentation time stamp */
733 if (pkt->dts == AV_NOPTS_VALUE) {
734 /* if we know the last pts, use it */
735 if(st->last_IP_pts != AV_NOPTS_VALUE)
736 st->cur_dts = pkt->dts = st->last_IP_pts;
738 pkt->dts = st->cur_dts;
740 st->cur_dts = pkt->dts;
742 /* this is tricky: the dts must be incremented by the duration
743 of the frame we are displaying, i.e. the last I or P frame */
744 if (st->last_IP_duration == 0)
745 st->cur_dts += pkt->duration;
747 st->cur_dts += st->last_IP_duration;
748 st->last_IP_duration = pkt->duration;
749 st->last_IP_pts= pkt->pts;
750 /* cannot compute PTS if not present (we can compute it only
751 by knowing the futur */
752 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
753 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
754 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
755 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
756 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
757 pkt->pts += pkt->duration;
758 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
762 /* presentation is not delayed : PTS and DTS are the same */
763 if (pkt->pts == AV_NOPTS_VALUE) {
764 if (pkt->dts == AV_NOPTS_VALUE) {
765 pkt->pts = st->cur_dts;
766 pkt->dts = st->cur_dts;
769 st->cur_dts = pkt->dts;
773 st->cur_dts = pkt->pts;
776 st->cur_dts += pkt->duration;
778 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
783 /* key frame computation */
784 switch(st->codec.codec_type) {
785 case CODEC_TYPE_VIDEO:
786 if (pc->pict_type == FF_I_TYPE)
787 pkt->flags |= PKT_FLAG_KEY;
789 case CODEC_TYPE_AUDIO:
790 pkt->flags |= PKT_FLAG_KEY;
798 void av_destruct_packet_nofree(AVPacket *pkt)
800 pkt->data = NULL; pkt->size = 0;
803 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
809 /* select current input stream component */
813 /* no parsing needed: we just output the packet as is */
814 /* raw data support */
816 compute_pkt_fields(s, st, NULL, pkt);
819 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
820 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
821 s->cur_ptr, s->cur_len,
822 s->cur_pkt.pts, s->cur_pkt.dts);
823 s->cur_pkt.pts = AV_NOPTS_VALUE;
824 s->cur_pkt.dts = AV_NOPTS_VALUE;
825 /* increment read pointer */
829 /* return packet if any */
833 pkt->stream_index = st->index;
834 pkt->pts = st->parser->pts;
835 pkt->dts = st->parser->dts;
836 pkt->destruct = av_destruct_packet_nofree;
837 compute_pkt_fields(s, st, st->parser, pkt);
842 av_free_packet(&s->cur_pkt);
846 /* read next packet */
847 ret = av_read_packet(s, &s->cur_pkt);
851 /* return the last frames, if any */
852 for(i = 0; i < s->nb_streams; i++) {
855 av_parser_parse(st->parser, &st->codec,
856 &pkt->data, &pkt->size,
858 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
863 /* no more packets: really terminates parsing */
867 st = s->streams[s->cur_pkt.stream_index];
870 s->cur_ptr = s->cur_pkt.data;
871 s->cur_len = s->cur_pkt.size;
872 if (st->need_parsing && !st->parser) {
873 st->parser = av_parser_init(st->codec.codec_id);
875 /* no parser available : just output the raw packets */
876 st->need_parsing = 0;
884 * Return the next frame of a stream. The returned packet is valid
885 * until the next av_read_frame() or until av_close_input_file() and
886 * must be freed with av_free_packet. For video, the packet contains
887 * exactly one frame. For audio, it contains an integer number of
888 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
889 * data). If the audio frames have a variable size (e.g. MPEG audio),
890 * then it contains one frame.
892 * pkt->pts, pkt->dts and pkt->duration are always set to correct
893 * values in AV_TIME_BASE unit (and guessed if the format cannot
894 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
895 * has B frames, so it is better to rely on pkt->dts if you do not
896 * decompress the payload.
898 * Return 0 if OK, < 0 if error or end of file.
900 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
904 pktl = s->packet_buffer;
906 /* read packet from packet buffer, if there is data */
908 s->packet_buffer = pktl->next;
912 return av_read_frame_internal(s, pkt);
916 /* XXX: suppress the packet queue */
917 static void flush_packet_queue(AVFormatContext *s)
922 pktl = s->packet_buffer;
925 s->packet_buffer = pktl->next;
926 av_free_packet(&pktl->pkt);
931 /*******************************************************/
934 int av_find_default_stream_index(AVFormatContext *s)
939 if (s->nb_streams <= 0)
941 for(i = 0; i < s->nb_streams; i++) {
943 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
950 /* flush the frame reader */
951 static void av_read_frame_flush(AVFormatContext *s)
956 flush_packet_queue(s);
958 /* free previous packet */
960 if (s->cur_st->parser)
961 av_free_packet(&s->cur_pkt);
968 /* for each stream, reset read state */
969 for(i = 0; i < s->nb_streams; i++) {
973 av_parser_close(st->parser);
976 st->last_IP_pts = AV_NOPTS_VALUE;
977 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
982 * updates cur_dts of all streams based on given timestamp and AVStream.
983 * stream ref_st unchanged, others set cur_dts in their native timebase
984 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
985 * @param timestamp new dts expressed in time_base of param ref_st
986 * @param ref_st reference stream giving time_base of param timestamp
988 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
991 for(i = 0; i < s->nb_streams; i++) {
992 AVStream *st = s->streams[i];
994 st->cur_dts = av_rescale(timestamp,
995 st->time_base.den * (int64_t)ref_st->time_base.num,
996 st->time_base.num * (int64_t)ref_st->time_base.den);
1001 * add a index entry into a sorted list updateing if it is already there.
1002 * @param timestamp timestamp in the timebase of the given stream
1004 int av_add_index_entry(AVStream *st,
1005 int64_t pos, int64_t timestamp, int distance, int flags)
1007 AVIndexEntry *entries, *ie;
1010 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1013 entries = av_fast_realloc(st->index_entries,
1014 &st->index_entries_allocated_size,
1015 (st->nb_index_entries + 1) *
1016 sizeof(AVIndexEntry));
1020 st->index_entries= entries;
1022 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1025 index= st->nb_index_entries++;
1026 ie= &entries[index];
1027 assert(index==0 || ie[-1].timestamp < timestamp);
1029 ie= &entries[index];
1030 if(ie->timestamp != timestamp){
1031 if(ie->timestamp <= timestamp)
1033 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1034 st->nb_index_entries++;
1035 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1036 distance= ie->min_distance;
1040 ie->timestamp = timestamp;
1041 ie->min_distance= distance;
1047 /* build an index for raw streams using a parser */
1048 static void av_build_index_raw(AVFormatContext *s)
1050 AVPacket pkt1, *pkt = &pkt1;
1055 av_read_frame_flush(s);
1056 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1059 ret = av_read_frame(s, pkt);
1062 if (pkt->stream_index == 0 && st->parser &&
1063 (pkt->flags & PKT_FLAG_KEY)) {
1064 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1065 0, AVINDEX_KEYFRAME);
1067 av_free_packet(pkt);
1071 /* return TRUE if we deal with a raw stream (raw codec data and
1073 static int is_raw_stream(AVFormatContext *s)
1077 if (s->nb_streams != 1)
1080 if (!st->need_parsing)
1086 * gets the index for a specific timestamp.
1087 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1088 * the timestamp which is <= the requested one, if backward is 0
1089 * then it will be >=
1090 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1091 * @return < 0 if no such timestamp could be found
1093 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1096 AVIndexEntry *entries= st->index_entries;
1097 int nb_entries= st->nb_index_entries;
1106 timestamp = entries[m].timestamp;
1107 if(timestamp >= wanted_timestamp)
1109 if(timestamp <= wanted_timestamp)
1112 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1114 if(!(flags & AVSEEK_FLAG_ANY)){
1115 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1116 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1128 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1129 * this isnt supposed to be called directly by a user application, but by demuxers
1130 * @param target_ts target timestamp in the time base of the given stream
1131 * @param stream_index stream number
1133 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1134 AVInputFormat *avif= s->iformat;
1135 int64_t pos_min, pos_max, pos, pos_limit;
1136 int64_t ts_min, ts_max, ts;
1138 int index, no_change;
1141 if (stream_index < 0)
1145 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1149 ts_min= AV_NOPTS_VALUE;
1150 pos_limit= -1; //gcc falsely says it may be uninitalized
1152 st= s->streams[stream_index];
1153 if(st->index_entries){
1156 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1157 index= FFMAX(index, 0);
1158 e= &st->index_entries[index];
1160 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1162 ts_min= e->timestamp;
1164 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1171 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1172 assert(index < st->nb_index_entries);
1174 e= &st->index_entries[index];
1175 assert(e->timestamp >= target_ts);
1177 ts_max= e->timestamp;
1178 pos_limit= pos_max - e->min_distance;
1180 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1181 pos_max,pos_limit, ts_max);
1186 if(ts_min == AV_NOPTS_VALUE){
1187 pos_min = s->data_offset;
1188 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1189 if (ts_min == AV_NOPTS_VALUE)
1193 if(ts_max == AV_NOPTS_VALUE){
1195 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1198 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1200 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1201 if (ts_max == AV_NOPTS_VALUE)
1205 int64_t tmp_pos= pos_max + 1;
1206 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1207 if(tmp_ts == AV_NOPTS_VALUE)
1216 while (pos_min < pos_limit) {
1218 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1222 assert(pos_limit <= pos_max);
1225 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1226 // interpolate position (better than dichotomy)
1227 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1228 + pos_min - approximate_keyframe_distance;
1229 }else if(no_change==1){
1230 // bisection, if interpolation failed to change min or max pos last time
1231 pos = (pos_min + pos_limit)>>1;
1233 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1238 else if(pos > pos_limit)
1242 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1248 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1250 assert(ts != AV_NOPTS_VALUE);
1251 if (target_ts <= ts) {
1252 pos_limit = start_pos - 1;
1256 if (target_ts >= ts) {
1262 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1263 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1266 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1268 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1269 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1270 pos, ts_min, target_ts, ts_max);
1273 url_fseek(&s->pb, pos, SEEK_SET);
1275 av_update_cur_dts(s, st, ts);
1280 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1281 int64_t pos_min, pos_max;
1285 if (stream_index < 0)
1288 st= s->streams[stream_index];
1291 pos_min = s->data_offset;
1292 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1294 if (pos < pos_min) pos= pos_min;
1295 else if(pos > pos_max) pos= pos_max;
1297 url_fseek(&s->pb, pos, SEEK_SET);
1300 av_update_cur_dts(s, st, ts);
1305 static int av_seek_frame_generic(AVFormatContext *s,
1306 int stream_index, int64_t timestamp, int flags)
1312 if (!s->index_built) {
1313 if (is_raw_stream(s)) {
1314 av_build_index_raw(s);
1321 st = s->streams[stream_index];
1322 index = av_index_search_timestamp(st, timestamp, flags);
1326 /* now we have found the index, we can seek */
1327 ie = &st->index_entries[index];
1328 av_read_frame_flush(s);
1329 url_fseek(&s->pb, ie->pos, SEEK_SET);
1331 av_update_cur_dts(s, st, ie->timestamp);
1337 * Seek to the key frame at timestamp.
1338 * 'timestamp' in 'stream_index'.
1339 * @param stream_index If stream_index is (-1), a default
1340 * stream is selected, and timestamp is automatically converted
1341 * from AV_TIME_BASE units to the stream specific time_base.
1342 * @param timestamp timestamp in AVStream.time_base units
1343 * @param flags flags which select direction and seeking mode
1344 * @return >= 0 on success
1346 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1351 av_read_frame_flush(s);
1353 if(flags & AVSEEK_FLAG_BYTE)
1354 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1356 if(stream_index < 0){
1357 stream_index= av_find_default_stream_index(s);
1358 if(stream_index < 0)
1361 st= s->streams[stream_index];
1362 /* timestamp for default must be expressed in AV_TIME_BASE units */
1363 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1365 st= s->streams[stream_index];
1367 /* first, we try the format specific seek */
1368 if (s->iformat->read_seek)
1369 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1376 if(s->iformat->read_timestamp)
1377 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1379 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1382 /*******************************************************/
1384 /* return TRUE if the stream has accurate timings for at least one component */
1385 static int av_has_timings(AVFormatContext *ic)
1390 for(i = 0;i < ic->nb_streams; i++) {
1391 st = ic->streams[i];
1392 if (st->start_time != AV_NOPTS_VALUE &&
1393 st->duration != AV_NOPTS_VALUE)
1399 /* estimate the stream timings from the one of each components. Also
1400 compute the global bitrate if possible */
1401 static void av_update_stream_timings(AVFormatContext *ic)
1403 int64_t start_time, start_time1, end_time, end_time1;
1407 start_time = MAXINT64;
1408 end_time = MININT64;
1409 for(i = 0;i < ic->nb_streams; i++) {
1410 st = ic->streams[i];
1411 if (st->start_time != AV_NOPTS_VALUE) {
1412 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1413 if (start_time1 < start_time)
1414 start_time = start_time1;
1415 if (st->duration != AV_NOPTS_VALUE) {
1416 end_time1 = start_time1
1417 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1418 if (end_time1 > end_time)
1419 end_time = end_time1;
1423 if (start_time != MAXINT64) {
1424 ic->start_time = start_time;
1425 if (end_time != MAXINT64) {
1426 ic->duration = end_time - start_time;
1427 if (ic->file_size > 0) {
1428 /* compute the bit rate */
1429 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1430 (double)ic->duration;
1437 static void fill_all_stream_timings(AVFormatContext *ic)
1442 av_update_stream_timings(ic);
1443 for(i = 0;i < ic->nb_streams; i++) {
1444 st = ic->streams[i];
1445 if (st->start_time == AV_NOPTS_VALUE) {
1446 if(ic->start_time != AV_NOPTS_VALUE)
1447 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1448 if(ic->duration != AV_NOPTS_VALUE)
1449 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1454 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1456 int64_t filesize, duration;
1460 /* if bit_rate is already set, we believe it */
1461 if (ic->bit_rate == 0) {
1463 for(i=0;i<ic->nb_streams;i++) {
1464 st = ic->streams[i];
1465 bit_rate += st->codec.bit_rate;
1467 ic->bit_rate = bit_rate;
1470 /* if duration is already set, we believe it */
1471 if (ic->duration == AV_NOPTS_VALUE &&
1472 ic->bit_rate != 0 &&
1473 ic->file_size != 0) {
1474 filesize = ic->file_size;
1476 for(i = 0; i < ic->nb_streams; i++) {
1477 st = ic->streams[i];
1478 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1479 if (st->start_time == AV_NOPTS_VALUE ||
1480 st->duration == AV_NOPTS_VALUE) {
1482 st->duration = duration;
1489 #define DURATION_MAX_READ_SIZE 250000
1491 /* only usable for MPEG-PS streams */
1492 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1494 AVPacket pkt1, *pkt = &pkt1;
1496 int read_size, i, ret;
1497 int64_t start_time, end_time, end_time1;
1498 int64_t filesize, offset, duration;
1500 /* free previous packet */
1501 if (ic->cur_st && ic->cur_st->parser)
1502 av_free_packet(&ic->cur_pkt);
1505 /* flush packet queue */
1506 flush_packet_queue(ic);
1508 for(i=0;i<ic->nb_streams;i++) {
1509 st = ic->streams[i];
1511 av_parser_close(st->parser);
1516 /* we read the first packets to get the first PTS (not fully
1517 accurate, but it is enough now) */
1518 url_fseek(&ic->pb, 0, SEEK_SET);
1521 if (read_size >= DURATION_MAX_READ_SIZE)
1523 /* if all info is available, we can stop */
1524 for(i = 0;i < ic->nb_streams; i++) {
1525 st = ic->streams[i];
1526 if (st->start_time == AV_NOPTS_VALUE)
1529 if (i == ic->nb_streams)
1532 ret = av_read_packet(ic, pkt);
1535 read_size += pkt->size;
1536 st = ic->streams[pkt->stream_index];
1537 if (pkt->pts != AV_NOPTS_VALUE) {
1538 if (st->start_time == AV_NOPTS_VALUE)
1539 st->start_time = pkt->pts;
1541 av_free_packet(pkt);
1544 /* estimate the end time (duration) */
1545 /* XXX: may need to support wrapping */
1546 filesize = ic->file_size;
1547 offset = filesize - DURATION_MAX_READ_SIZE;
1551 url_fseek(&ic->pb, offset, SEEK_SET);
1554 if (read_size >= DURATION_MAX_READ_SIZE)
1556 /* if all info is available, we can stop */
1557 for(i = 0;i < ic->nb_streams; i++) {
1558 st = ic->streams[i];
1559 if (st->duration == AV_NOPTS_VALUE)
1562 if (i == ic->nb_streams)
1565 ret = av_read_packet(ic, pkt);
1568 read_size += pkt->size;
1569 st = ic->streams[pkt->stream_index];
1570 if (pkt->pts != AV_NOPTS_VALUE) {
1571 end_time = pkt->pts;
1572 duration = end_time - st->start_time;
1574 if (st->duration == AV_NOPTS_VALUE ||
1575 st->duration < duration)
1576 st->duration = duration;
1579 av_free_packet(pkt);
1582 fill_all_stream_timings(ic);
1584 url_fseek(&ic->pb, 0, SEEK_SET);
1587 static void av_estimate_timings(AVFormatContext *ic)
1592 /* get the file size, if possible */
1593 if (ic->iformat->flags & AVFMT_NOFILE) {
1596 h = url_fileno(&ic->pb);
1597 file_size = url_filesize(h);
1601 ic->file_size = file_size;
1603 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1604 /* get accurate estimate from the PTSes */
1605 av_estimate_timings_from_pts(ic);
1606 } else if (av_has_timings(ic)) {
1607 /* at least one components has timings - we use them for all
1609 fill_all_stream_timings(ic);
1611 /* less precise: use bit rate info */
1612 av_estimate_timings_from_bit_rate(ic);
1614 av_update_stream_timings(ic);
1620 for(i = 0;i < ic->nb_streams; i++) {
1621 st = ic->streams[i];
1622 printf("%d: start_time: %0.3f duration: %0.3f\n",
1623 i, (double)st->start_time / AV_TIME_BASE,
1624 (double)st->duration / AV_TIME_BASE);
1626 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1627 (double)ic->start_time / AV_TIME_BASE,
1628 (double)ic->duration / AV_TIME_BASE,
1629 ic->bit_rate / 1000);
1634 static int has_codec_parameters(AVCodecContext *enc)
1637 switch(enc->codec_type) {
1638 case CODEC_TYPE_AUDIO:
1639 val = enc->sample_rate;
1641 case CODEC_TYPE_VIDEO:
1642 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1651 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1655 int got_picture, ret;
1658 codec = avcodec_find_decoder(st->codec.codec_id);
1661 ret = avcodec_open(&st->codec, codec);
1665 if(!has_codec_parameters(&st->codec)){
1666 switch(st->codec.codec_type) {
1667 case CODEC_TYPE_VIDEO:
1668 ret = avcodec_decode_video(&st->codec, &picture,
1669 &got_picture, (uint8_t *)data, size);
1671 case CODEC_TYPE_AUDIO:
1672 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1675 ret = avcodec_decode_audio(&st->codec, samples,
1676 &got_picture, (uint8_t *)data, size);
1684 avcodec_close(&st->codec);
1688 /* absolute maximum size we read until we abort */
1689 #define MAX_READ_SIZE 5000000
1691 /* maximum duration until we stop analysing the stream */
1692 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1695 * Read the beginning of a media file to get stream information. This
1696 * is useful for file formats with no headers such as MPEG. This
1697 * function also compute the real frame rate in case of mpeg2 repeat
1700 * @param ic media file handle
1701 * @return >=0 if OK. AVERROR_xxx if error.
1702 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1704 int av_find_stream_info(AVFormatContext *ic)
1706 int i, count, ret, read_size;
1708 AVPacket pkt1, *pkt;
1709 AVPacketList *pktl=NULL, **ppktl;
1710 int64_t last_dts[MAX_STREAMS];
1711 int64_t duration_sum[MAX_STREAMS];
1712 int duration_count[MAX_STREAMS]={0};
1714 for(i=0;i<ic->nb_streams;i++) {
1715 st = ic->streams[i];
1716 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1717 /* if(!st->time_base.num)
1719 if(!st->codec.time_base.num)
1720 st->codec.time_base= st->time_base;
1724 for(i=0;i<MAX_STREAMS;i++){
1725 last_dts[i]= AV_NOPTS_VALUE;
1726 duration_sum[i]= INT64_MAX;
1731 ppktl = &ic->packet_buffer;
1733 /* check if one codec still needs to be handled */
1734 for(i=0;i<ic->nb_streams;i++) {
1735 st = ic->streams[i];
1736 if (!has_codec_parameters(&st->codec))
1738 /* variable fps and no guess at the real fps */
1739 if( st->codec.time_base.den >= 1000LL*st->codec.time_base.num
1740 && duration_count[i]<20 && st->codec.codec_type == CODEC_TYPE_VIDEO)
1743 if (i == ic->nb_streams) {
1744 /* NOTE: if the format has no header, then we need to read
1745 some packets to get most of the streams, so we cannot
1747 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1748 /* if we found the info for all the codecs, we can stop */
1753 /* we did not get all the codec info, but we read too much data */
1754 if (read_size >= MAX_READ_SIZE) {
1760 /* NOTE: a new stream can be added there if no header in file
1761 (AVFMTCTX_NOHEADER) */
1762 ret = av_read_frame_internal(ic, &pkt1);
1765 ret = -1; /* we could not have all the codec parameters before EOF */
1766 for(i=0;i<ic->nb_streams;i++) {
1767 st = ic->streams[i];
1768 if (!has_codec_parameters(&st->codec))
1771 if (i == ic->nb_streams)
1776 pktl = av_mallocz(sizeof(AVPacketList));
1778 ret = AVERROR_NOMEM;
1782 /* add the packet in the buffered packet list */
1784 ppktl = &pktl->next;
1789 /* duplicate the packet */
1790 if (av_dup_packet(pkt) < 0) {
1791 ret = AVERROR_NOMEM;
1795 read_size += pkt->size;
1797 st = ic->streams[pkt->stream_index];
1798 st->codec_info_duration += pkt->duration;
1799 if (pkt->duration != 0)
1800 st->codec_info_nb_frames++;
1802 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1803 int index= pkt->stream_index;
1804 int64_t last= last_dts[index];
1805 int64_t duration= pkt->dts - last;
1807 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1808 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1809 duration_sum[index]= duration;
1810 duration_count[index]=1;
1812 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1813 duration_sum[index] += duration;
1814 duration_count[index]+= factor;
1817 last_dts[pkt->stream_index]= pkt->dts;
1819 /* if still no information, we try to open the codec and to
1820 decompress the frame. We try to avoid that in most cases as
1821 it takes longer and uses more memory. For MPEG4, we need to
1822 decompress for Quicktime. */
1823 if (!has_codec_parameters(&st->codec) /*&&
1824 (st->codec.codec_id == CODEC_ID_FLV1 ||
1825 st->codec.codec_id == CODEC_ID_H264 ||
1826 st->codec.codec_id == CODEC_ID_H263 ||
1827 st->codec.codec_id == CODEC_ID_H261 ||
1828 st->codec.codec_id == CODEC_ID_VORBIS ||
1829 st->codec.codec_id == CODEC_ID_MJPEG ||
1830 st->codec.codec_id == CODEC_ID_PNG ||
1831 st->codec.codec_id == CODEC_ID_PAM ||
1832 st->codec.codec_id == CODEC_ID_PGM ||
1833 st->codec.codec_id == CODEC_ID_PGMYUV ||
1834 st->codec.codec_id == CODEC_ID_PBM ||
1835 st->codec.codec_id == CODEC_ID_PPM ||
1836 st->codec.codec_id == CODEC_ID_SHORTEN ||
1837 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1838 try_decode_frame(st, pkt->data, pkt->size);
1840 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1846 for(i=0;i<ic->nb_streams;i++) {
1847 st = ic->streams[i];
1848 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1849 if(st->codec.codec_id == CODEC_ID_RAWVIDEO && !st->codec.codec_tag && !st->codec.bits_per_sample)
1850 st->codec.codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec.pix_fmt);
1852 if(duration_count[i] && st->codec.time_base.num*1000 <= st->codec.time_base.den &&
1853 st->time_base.num*duration_sum[i]/duration_count[i]*1000LL > st->time_base.den){
1857 num= st->time_base.den*duration_count[i];
1858 den= st->time_base.num*duration_sum[i];
1860 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4);
1861 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4);
1862 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission
1863 st->r_frame_rate.num= fps1.num*1000;
1864 st->r_frame_rate.den= fps1.den*1001;
1868 /* set real frame rate info */
1869 /* compute the real frame rate for telecine */
1870 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1871 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1872 st->codec.sub_id == 2) {
1873 if (st->codec_info_nb_frames >= 20) {
1874 float coded_frame_rate, est_frame_rate;
1875 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1876 (double)st->codec_info_duration ;
1877 coded_frame_rate = 1.0/av_q2d(st->codec.time_base);
1879 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1880 coded_frame_rate, est_frame_rate);
1882 /* if we detect that it could be a telecine, we
1883 signal it. It would be better to do it at a
1884 higher level as it can change in a film */
1885 if (coded_frame_rate >= 24.97 &&
1886 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1887 st->r_frame_rate = (AVRational){24000, 1001};
1891 /* if no real frame rate, use the codec one */
1892 if (!st->r_frame_rate.num){
1893 st->r_frame_rate.num = st->codec.time_base.den;
1894 st->r_frame_rate.den = st->codec.time_base.num;
1899 av_estimate_timings(ic);
1901 /* correct DTS for b frame streams with no timestamps */
1902 for(i=0;i<ic->nb_streams;i++) {
1903 st = ic->streams[i];
1904 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1906 ppktl = &ic->packet_buffer;
1908 if(ppkt1->stream_index != i)
1910 if(ppkt1->pkt->dts < 0)
1912 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1914 ppkt1->pkt->dts -= delta;
1919 st->cur_dts -= delta;
1927 /*******************************************************/
1930 * start playing a network based stream (e.g. RTSP stream) at the
1933 int av_read_play(AVFormatContext *s)
1935 if (!s->iformat->read_play)
1936 return AVERROR_NOTSUPP;
1937 return s->iformat->read_play(s);
1941 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1944 int av_read_pause(AVFormatContext *s)
1946 if (!s->iformat->read_pause)
1947 return AVERROR_NOTSUPP;
1948 return s->iformat->read_pause(s);
1952 * Close a media file (but not its codecs)
1954 * @param s media file handle
1956 void av_close_input_file(AVFormatContext *s)
1958 int i, must_open_file;
1961 /* free previous packet */
1962 if (s->cur_st && s->cur_st->parser)
1963 av_free_packet(&s->cur_pkt);
1965 if (s->iformat->read_close)
1966 s->iformat->read_close(s);
1967 for(i=0;i<s->nb_streams;i++) {
1968 /* free all data in a stream component */
1971 av_parser_close(st->parser);
1973 av_free(st->index_entries);
1976 flush_packet_queue(s);
1978 if (s->iformat->flags & AVFMT_NOFILE) {
1981 if (must_open_file) {
1984 av_freep(&s->priv_data);
1989 * Add a new stream to a media file. Can only be called in the
1990 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
1991 * format context, then new streams can be added in read_packet too.
1994 * @param s media file handle
1995 * @param id file format dependent stream id
1997 AVStream *av_new_stream(AVFormatContext *s, int id)
2001 if (s->nb_streams >= MAX_STREAMS)
2004 st = av_mallocz(sizeof(AVStream));
2007 avcodec_get_context_defaults(&st->codec);
2009 /* no default bitrate if decoding */
2010 st->codec.bit_rate = 0;
2012 st->index = s->nb_streams;
2014 st->start_time = AV_NOPTS_VALUE;
2015 st->duration = AV_NOPTS_VALUE;
2016 st->cur_dts = AV_NOPTS_VALUE;
2018 /* default pts settings is MPEG like */
2019 av_set_pts_info(st, 33, 1, 90000);
2020 st->last_IP_pts = AV_NOPTS_VALUE;
2022 s->streams[s->nb_streams++] = st;
2026 /************************************************************/
2027 /* output media file */
2029 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2033 if (s->oformat->priv_data_size > 0) {
2034 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2036 return AVERROR_NOMEM;
2038 s->priv_data = NULL;
2040 if (s->oformat->set_parameters) {
2041 ret = s->oformat->set_parameters(s, ap);
2049 * allocate the stream private data and write the stream header to an
2052 * @param s media file handle
2053 * @return 0 if OK. AVERROR_xxx if error.
2055 int av_write_header(AVFormatContext *s)
2060 ret = s->oformat->write_header(s);
2064 /* init PTS generation */
2065 for(i=0;i<s->nb_streams;i++) {
2068 switch (st->codec.codec_type) {
2069 case CODEC_TYPE_AUDIO:
2070 av_frac_init(&st->pts, 0, 0,
2071 (int64_t)st->time_base.num * st->codec.sample_rate);
2073 case CODEC_TYPE_VIDEO:
2074 av_frac_init(&st->pts, 0, 0,
2075 (int64_t)st->time_base.num * st->codec.time_base.den);
2084 //FIXME merge with compute_pkt_fields
2085 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2086 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
2087 int num, den, frame_size;
2089 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2091 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2094 /* duration field */
2095 if (pkt->duration == 0) {
2096 compute_frame_duration(&num, &den, st, NULL, pkt);
2098 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2102 //XXX/FIXME this is a temporary hack until all encoders output pts
2103 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2105 // pkt->pts= st->cur_dts;
2106 pkt->pts= st->pts.val;
2109 //calculate dts from pts
2110 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2112 if(st->last_IP_pts == AV_NOPTS_VALUE){
2113 st->last_IP_pts= -pkt->duration;
2115 if(st->last_IP_pts < pkt->pts){
2116 pkt->dts= st->last_IP_pts;
2117 st->last_IP_pts= pkt->pts;
2124 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2125 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %Ld >= %Ld\n", st->cur_dts, pkt->dts);
2128 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2129 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2133 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2134 st->cur_dts= pkt->dts;
2135 st->pts.val= pkt->dts;
2138 switch (st->codec.codec_type) {
2139 case CODEC_TYPE_AUDIO:
2140 frame_size = get_audio_frame_size(&st->codec, pkt->size);
2142 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2143 but it would be better if we had the real timestamps from the encoder */
2144 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2145 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2148 case CODEC_TYPE_VIDEO:
2149 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.time_base.num);
2157 static void truncate_ts(AVStream *st, AVPacket *pkt){
2158 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2161 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2163 pkt->pts &= pts_mask;
2164 pkt->dts &= pts_mask;
2168 * Write a packet to an output media file. The packet shall contain
2169 * one audio or video frame.
2171 * @param s media file handle
2172 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2173 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2175 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2179 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2183 truncate_ts(s->streams[pkt->stream_index], pkt);
2185 ret= s->oformat->write_packet(s, pkt);
2187 ret= url_ferror(&s->pb);
2192 * interleave_packet implementation which will interleave per DTS.
2194 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2195 AVPacketList *pktl, **next_point, *this_pktl;
2197 int streams[MAX_STREAMS];
2200 AVStream *st= s->streams[ pkt->stream_index];
2202 assert(pkt->destruct != av_destruct_packet); //FIXME
2204 this_pktl = av_mallocz(sizeof(AVPacketList));
2205 this_pktl->pkt= *pkt;
2206 av_dup_packet(&this_pktl->pkt);
2208 next_point = &s->packet_buffer;
2210 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2211 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2212 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2213 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2215 next_point= &(*next_point)->next;
2217 this_pktl->next= *next_point;
2218 *next_point= this_pktl;
2221 memset(streams, 0, sizeof(streams));
2222 pktl= s->packet_buffer;
2224 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2225 if(streams[ pktl->pkt.stream_index ] == 0)
2227 streams[ pktl->pkt.stream_index ]++;
2231 if(s->nb_streams == stream_count || (flush && stream_count)){
2232 pktl= s->packet_buffer;
2235 s->packet_buffer= pktl->next;
2239 av_init_packet(out);
2245 * Interleaves a AVPacket correctly so it can be muxed.
2246 * @param out the interleaved packet will be output here
2247 * @param in the input packet
2248 * @param flush 1 if no further packets are available as input and all
2249 * remaining packets should be output
2250 * @return 1 if a packet was output, 0 if no packet could be output,
2251 * < 0 if an error occured
2253 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2254 if(s->oformat->interleave_packet)
2255 return s->oformat->interleave_packet(s, out, in, flush);
2257 return av_interleave_packet_per_dts(s, out, in, flush);
2261 * Writes a packet to an output media file ensuring correct interleaving.
2262 * The packet shall contain one audio or video frame.
2263 * If the packets are already correctly interleaved the application should
2264 * call av_write_frame() instead as its slightly faster, its also important
2265 * to keep in mind that completly non interleaved input will need huge amounts
2266 * of memory to interleave with this, so its prefereable to interleave at the
2269 * @param s media file handle
2270 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2271 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2273 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2274 AVStream *st= s->streams[ pkt->stream_index];
2276 //FIXME/XXX/HACK drop zero sized packets
2277 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2280 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2281 if(compute_pkt_fields2(st, pkt) < 0)
2284 if(pkt->dts == AV_NOPTS_VALUE)
2289 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2290 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2293 truncate_ts(s->streams[opkt.stream_index], &opkt);
2294 ret= s->oformat->write_packet(s, &opkt);
2296 av_free_packet(&opkt);
2301 if(url_ferror(&s->pb))
2302 return url_ferror(&s->pb);
2307 * write the stream trailer to an output media file and and free the
2308 * file private data.
2310 * @param s media file handle
2311 * @return 0 if OK. AVERROR_xxx if error. */
2312 int av_write_trailer(AVFormatContext *s)
2318 ret= av_interleave_packet(s, &pkt, NULL, 1);
2319 if(ret<0) //FIXME cleanup needed for ret<0 ?
2324 truncate_ts(s->streams[pkt.stream_index], &pkt);
2325 ret= s->oformat->write_packet(s, &pkt);
2327 av_free_packet(&pkt);
2331 if(url_ferror(&s->pb))
2335 ret = s->oformat->write_trailer(s);
2338 ret=url_ferror(&s->pb);
2339 for(i=0;i<s->nb_streams;i++)
2340 av_freep(&s->streams[i]->priv_data);
2341 av_freep(&s->priv_data);
2345 /* "user interface" functions */
2347 void dump_format(AVFormatContext *ic,
2355 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2356 is_output ? "Output" : "Input",
2358 is_output ? ic->oformat->name : ic->iformat->name,
2359 is_output ? "to" : "from", url);
2361 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2362 if (ic->duration != AV_NOPTS_VALUE) {
2363 int hours, mins, secs, us;
2364 secs = ic->duration / AV_TIME_BASE;
2365 us = ic->duration % AV_TIME_BASE;
2370 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2371 (10 * us) / AV_TIME_BASE);
2373 av_log(NULL, AV_LOG_DEBUG, "N/A");
2375 if (ic->start_time != AV_NOPTS_VALUE) {
2377 av_log(NULL, AV_LOG_DEBUG, ", start: ");
2378 secs = ic->start_time / AV_TIME_BASE;
2379 us = ic->start_time % AV_TIME_BASE;
2380 av_log(NULL, AV_LOG_DEBUG, "%d.%06d",
2381 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2383 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2385 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2387 av_log(NULL, AV_LOG_DEBUG, "N/A");
2389 av_log(NULL, AV_LOG_DEBUG, "\n");
2391 for(i=0;i<ic->nb_streams;i++) {
2392 AVStream *st = ic->streams[i];
2393 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2394 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2395 /* the pid is an important information, so we display it */
2396 /* XXX: add a generic system */
2398 flags = ic->oformat->flags;
2400 flags = ic->iformat->flags;
2401 if (flags & AVFMT_SHOW_IDS) {
2402 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2404 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2411 int frame_rate, frame_rate_base;
2414 static AbvEntry frame_abvs[] = {
2415 { "ntsc", 720, 480, 30000, 1001 },
2416 { "pal", 720, 576, 25, 1 },
2417 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2418 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2419 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2420 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2421 { "film", 352, 240, 24, 1 },
2422 { "ntsc-film", 352, 240, 24000, 1001 },
2423 { "sqcif", 128, 96, 0, 0 },
2424 { "qcif", 176, 144, 0, 0 },
2425 { "cif", 352, 288, 0, 0 },
2426 { "4cif", 704, 576, 0, 0 },
2429 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2432 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2434 int frame_width = 0, frame_height = 0;
2437 if (!strcmp(frame_abvs[i].abv, str)) {
2438 frame_width = frame_abvs[i].width;
2439 frame_height = frame_abvs[i].height;
2445 frame_width = strtol(p, (char **)&p, 10);
2448 frame_height = strtol(p, (char **)&p, 10);
2450 if (frame_width <= 0 || frame_height <= 0)
2452 *width_ptr = frame_width;
2453 *height_ptr = frame_height;
2457 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2462 /* First, we check our abbreviation table */
2463 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2464 if (!strcmp(frame_abvs[i].abv, arg)) {
2465 *frame_rate = frame_abvs[i].frame_rate;
2466 *frame_rate_base = frame_abvs[i].frame_rate_base;
2470 /* Then, we try to parse it as fraction */
2471 cp = strchr(arg, '/');
2473 cp = strchr(arg, ':');
2476 *frame_rate = strtol(arg, &cpp, 10);
2477 if (cpp != arg || cpp == cp)
2478 *frame_rate_base = strtol(cp+1, &cpp, 10);
2483 /* Finally we give up and parse it as double */
2484 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2485 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2487 if (!*frame_rate || !*frame_rate_base)
2494 * - If not a duration:
2495 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2496 * Time is localtime unless Z is suffixed to the end. In this case GMT
2497 * Return the date in micro seconds since 1970
2499 * HH[:MM[:SS[.m...]]]
2502 int64_t parse_date(const char *datestr, int duration)
2508 static const char *date_fmt[] = {
2512 static const char *time_fmt[] = {
2522 time_t now = time(0);
2524 len = strlen(datestr);
2526 lastch = datestr[len - 1];
2529 is_utc = (lastch == 'z' || lastch == 'Z');
2531 memset(&dt, 0, sizeof(dt));
2536 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2537 q = small_strptime(p, date_fmt[i], &dt);
2547 dt = *localtime(&now);
2549 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2554 if (*p == 'T' || *p == 't' || *p == ' ')
2557 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2558 q = small_strptime(p, time_fmt[i], &dt);
2568 q = small_strptime(p, time_fmt[0], &dt);
2570 dt.tm_sec = strtol(p, (char **)&q, 10);
2576 /* Now we have all the fields that we can get */
2581 return now * int64_t_C(1000000);
2585 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2587 dt.tm_isdst = -1; /* unknown */
2600 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2603 val += n * (*q - '0');
2607 return negative ? -t : t;
2610 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2612 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2622 while (*p != '\0' && *p != '=' && *p != '&') {
2623 if ((q - tag) < sizeof(tag) - 1)
2631 while (*p != '&' && *p != '\0') {
2632 if ((q - arg) < arg_size - 1) {
2642 if (!strcmp(tag, tag1))
2651 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2652 the '%0nd' format where 'n' is the total number of digits and
2653 '%%'. Return 0 if OK, and -1 if format error */
2654 int get_frame_filename(char *buf, int buf_size,
2655 const char *path, int number)
2658 char *q, buf1[20], c;
2659 int nd, len, percentd_found;
2671 while (isdigit(*p)) {
2672 nd = nd * 10 + *p++ - '0';
2675 } while (isdigit(c));
2684 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2686 if ((q - buf + len) > buf_size - 1)
2688 memcpy(q, buf1, len);
2696 if ((q - buf) < buf_size - 1)
2700 if (!percentd_found)
2710 * Print nice hexa dump of a buffer
2711 * @param f stream for output
2713 * @param size buffer size
2715 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2719 for(i=0;i<size;i+=16) {
2723 fprintf(f, "%08x ", i);
2726 fprintf(f, " %02x", buf[i+j]);
2731 for(j=0;j<len;j++) {
2733 if (c < ' ' || c > '~')
2735 fprintf(f, "%c", c);
2742 * Print on 'f' a nice dump of a packet
2743 * @param f stream for output
2744 * @param pkt packet to dump
2745 * @param dump_payload true if the payload must be displayed too
2747 //FIXME needs to know the time_base
2748 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2750 fprintf(f, "stream #%d:\n", pkt->stream_index);
2751 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2752 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2753 /* DTS is _always_ valid after av_read_frame() */
2754 fprintf(f, " dts=");
2755 if (pkt->dts == AV_NOPTS_VALUE)
2758 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2759 /* PTS may be not known if B frames are present */
2760 fprintf(f, " pts=");
2761 if (pkt->pts == AV_NOPTS_VALUE)
2764 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2766 fprintf(f, " size=%d\n", pkt->size);
2768 av_hex_dump(f, pkt->data, pkt->size);
2771 void url_split(char *proto, int proto_size,
2772 char *authorization, int authorization_size,
2773 char *hostname, int hostname_size,
2775 char *path, int path_size,
2786 while (*p != ':' && *p != '\0') {
2787 if ((q - proto) < proto_size - 1)
2793 if (authorization_size > 0)
2794 authorization[0] = '\0';
2798 if (hostname_size > 0)
2802 char *at,*slash; // PETR: position of '@' character and '/' character
2809 at = strchr(p,'@'); // PETR: get the position of '@'
2810 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2811 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2813 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2815 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2816 if (*p == '@') { // PETR: passed '@'
2817 if (authorization_size > 0)
2821 } else if (!at) { // PETR: hostname
2822 if ((q - hostname) < hostname_size - 1)
2825 if ((q - authorization) < authorization_size - 1)
2830 if (hostname_size > 0)
2834 port = strtoul(p, (char **)&p, 10);
2839 pstrcpy(path, path_size, p);
2843 * Set the pts for a given stream
2845 * @param pts_wrap_bits number of bits effectively used by the pts
2846 * (used for wrap control, 33 is the value for MPEG)
2847 * @param pts_num numerator to convert to seconds (MPEG: 1)
2848 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2850 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2851 int pts_num, int pts_den)
2853 s->pts_wrap_bits = pts_wrap_bits;
2854 s->time_base.num = pts_num;
2855 s->time_base.den = pts_den;
2858 /* fraction handling */
2861 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2862 * as 0 <= num < den.
2864 * @param f fractional number
2865 * @param val integer value
2866 * @param num must be >= 0
2867 * @param den must be >= 1
2869 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2881 /* set f to (val + 0.5) */
2882 void av_frac_set(AVFrac *f, int64_t val)
2885 f->num = f->den >> 1;
2889 * Fractionnal addition to f: f = f + (incr / f->den)
2891 * @param f fractional number
2892 * @param incr increment, can be positive or negative
2894 void av_frac_add(AVFrac *f, int64_t incr)
2898 num = f->num + incr;
2901 f->val += num / den;
2907 } else if (num >= den) {
2908 f->val += num / den;
2915 * register a new image format
2916 * @param img_fmt Image format descriptor
2918 void av_register_image_format(AVImageFormat *img_fmt)
2922 p = &first_image_format;
2923 while (*p != NULL) p = &(*p)->next;
2925 img_fmt->next = NULL;
2928 /* guess image format */
2929 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2931 AVImageFormat *fmt1, *fmt;
2932 int score, score_max;
2936 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2937 if (fmt1->img_probe) {
2938 score = fmt1->img_probe(pd);
2939 if (score > score_max) {
2948 AVImageFormat *guess_image_format(const char *filename)
2950 AVImageFormat *fmt1;
2952 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2953 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2960 * Read an image from a stream.
2961 * @param gb byte stream containing the image
2962 * @param fmt image format, NULL if probing is required
2964 int av_read_image(ByteIOContext *pb, const char *filename,
2966 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2968 char buf[PROBE_BUF_SIZE];
2969 AVProbeData probe_data, *pd = &probe_data;
2974 pd->filename = filename;
2976 pos = url_ftell(pb);
2977 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
2978 url_fseek(pb, pos, SEEK_SET);
2979 fmt = av_probe_image_format(pd);
2982 return AVERROR_NOFMT;
2983 ret = fmt->img_read(pb, alloc_cb, opaque);
2988 * Write an image to a stream.
2989 * @param pb byte stream for the image output
2990 * @param fmt image format
2991 * @param img image data and informations
2993 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
2995 return fmt->img_write(pb, img);