2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
69 #if HAVE_SYS_RESOURCE_H
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
76 #if HAVE_GETPROCESSMEMORYINFO
80 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <sys/select.h>
91 #include <sys/ioctl.h>
105 #include "cmdutils.h"
107 #include "libavutil/avassert.h"
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
112 static FILE *vstats_file;
114 const char *const forced_keyframes_const_names[] = {
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static unsigned dup_warning = 1000;
130 static int nb_frames_drop = 0;
131 static int64_t decode_error_stat[2];
133 static int want_sdp = 1;
135 static int current_time;
136 AVIOContext *progress_avio = NULL;
138 static uint8_t *subtitle_out;
140 InputStream **input_streams = NULL;
141 int nb_input_streams = 0;
142 InputFile **input_files = NULL;
143 int nb_input_files = 0;
145 OutputStream **output_streams = NULL;
146 int nb_output_streams = 0;
147 OutputFile **output_files = NULL;
148 int nb_output_files = 0;
150 FilterGraph **filtergraphs;
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
161 static void free_input_threads(void);
165 Convert subtitles to video with alpha to insert them in filter graphs.
166 This is a temporary solution until libavfilter gets real subtitles support.
169 static int sub2video_get_blank_frame(InputStream *ist)
172 AVFrame *frame = ist->sub2video.frame;
174 av_frame_unref(frame);
175 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
178 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
187 uint32_t *pal, *dst2;
191 if (r->type != SUBTITLE_BITMAP) {
192 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
195 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197 r->x, r->y, r->w, r->h, w, h
202 dst += r->y * dst_linesize + r->x * 4;
204 pal = (uint32_t *)r->data[1];
205 for (y = 0; y < r->h; y++) {
206 dst2 = (uint32_t *)dst;
208 for (x = 0; x < r->w; x++)
209 *(dst2++) = pal[*(src2++)];
211 src += r->linesize[0];
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 AVFrame *frame = ist->sub2video.frame;
220 av_assert1(frame->data[0]);
221 ist->sub2video.last_pts = frame->pts = pts;
222 for (i = 0; i < ist->nb_filters; i++)
223 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
224 AV_BUFFERSRC_FLAG_KEEP_REF |
225 AV_BUFFERSRC_FLAG_PUSH);
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
230 AVFrame *frame = ist->sub2video.frame;
234 int64_t pts, end_pts;
239 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240 AV_TIME_BASE_Q, ist->st->time_base);
241 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242 AV_TIME_BASE_Q, ist->st->time_base);
243 num_rects = sub->num_rects;
245 pts = ist->sub2video.end_pts;
249 if (sub2video_get_blank_frame(ist) < 0) {
250 av_log(ist->dec_ctx, AV_LOG_ERROR,
251 "Impossible to get a blank canvas.\n");
254 dst = frame->data [0];
255 dst_linesize = frame->linesize[0];
256 for (i = 0; i < num_rects; i++)
257 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258 sub2video_push_ref(ist, pts);
259 ist->sub2video.end_pts = end_pts;
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
264 InputFile *infile = input_files[ist->file_index];
268 /* When a frame is read from a file, examine all sub2video streams in
269 the same file and send the sub2video frame again. Otherwise, decoded
270 video frames could be accumulating in the filter graph while a filter
271 (possibly overlay) is desperately waiting for a subtitle frame. */
272 for (i = 0; i < infile->nb_streams; i++) {
273 InputStream *ist2 = input_streams[infile->ist_index + i];
274 if (!ist2->sub2video.frame)
276 /* subtitles seem to be usually muxed ahead of other streams;
277 if not, subtracting a larger time here is necessary */
278 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279 /* do not send the heartbeat frame if the subtitle is already ahead */
280 if (pts2 <= ist2->sub2video.last_pts)
282 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283 sub2video_update(ist2, NULL);
284 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
287 sub2video_push_ref(ist2, pts2);
291 static void sub2video_flush(InputStream *ist)
295 if (ist->sub2video.end_pts < INT64_MAX)
296 sub2video_update(ist, NULL);
297 for (i = 0; i < ist->nb_filters; i++)
298 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
301 /* end of sub2video hack */
303 static void term_exit_sigsafe(void)
307 tcsetattr (0, TCSANOW, &oldtty);
313 av_log(NULL, AV_LOG_QUIET, "%s", "");
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
324 sigterm_handler(int sig)
326 received_sigterm = sig;
327 received_nb_signals++;
329 if(received_nb_signals > 3) {
330 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331 strlen("Received > 3 system signals, hard exiting\n"));
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
340 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 case CTRL_BREAK_EVENT:
346 sigterm_handler(SIGINT);
349 case CTRL_CLOSE_EVENT:
350 case CTRL_LOGOFF_EVENT:
351 case CTRL_SHUTDOWN_EVENT:
352 sigterm_handler(SIGTERM);
353 /* Basically, with these 3 events, when we return from this method the
354 process is hard terminated, so stall as long as we need to
355 to try and let the main thread(s) clean up and gracefully terminate
356 (we have at most 5 seconds, but should be done far before that). */
357 while (!ffmpeg_exited) {
363 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
372 if (!run_as_daemon && stdin_interaction) {
374 if (tcgetattr (0, &tty) == 0) {
378 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
379 |INLCR|IGNCR|ICRNL|IXON);
380 tty.c_oflag |= OPOST;
381 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
382 tty.c_cflag &= ~(CSIZE|PARENB);
387 tcsetattr (0, TCSANOW, &tty);
389 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
393 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
394 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
396 signal(SIGXCPU, sigterm_handler);
398 #if HAVE_SETCONSOLECTRLHANDLER
399 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
403 /* read a key without blocking */
404 static int read_key(void)
416 n = select(1, &rfds, NULL, NULL, &tv);
425 # if HAVE_PEEKNAMEDPIPE
427 static HANDLE input_handle;
430 input_handle = GetStdHandle(STD_INPUT_HANDLE);
431 is_pipe = !GetConsoleMode(input_handle, &dw);
435 /* When running under a GUI, you will end here. */
436 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
437 // input pipe may have been closed by the program that ran ffmpeg
455 static int decode_interrupt_cb(void *ctx)
457 return received_nb_signals > transcode_init_done;
460 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
462 static void ffmpeg_cleanup(int ret)
467 int maxrss = getmaxrss() / 1024;
468 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
471 for (i = 0; i < nb_filtergraphs; i++) {
472 FilterGraph *fg = filtergraphs[i];
473 avfilter_graph_free(&fg->graph);
474 for (j = 0; j < fg->nb_inputs; j++) {
475 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
476 av_freep(&fg->inputs[j]->name);
477 av_freep(&fg->inputs[j]);
479 av_freep(&fg->inputs);
480 for (j = 0; j < fg->nb_outputs; j++) {
481 av_freep(&fg->outputs[j]->name);
482 av_freep(&fg->outputs[j]->formats);
483 av_freep(&fg->outputs[j]->channel_layouts);
484 av_freep(&fg->outputs[j]->sample_rates);
485 av_freep(&fg->outputs[j]);
487 av_freep(&fg->outputs);
488 av_freep(&fg->graph_desc);
490 av_freep(&filtergraphs[i]);
492 av_freep(&filtergraphs);
494 av_freep(&subtitle_out);
497 for (i = 0; i < nb_output_files; i++) {
498 OutputFile *of = output_files[i];
503 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
505 avformat_free_context(s);
506 av_dict_free(&of->opts);
508 av_freep(&output_files[i]);
510 for (i = 0; i < nb_output_streams; i++) {
511 OutputStream *ost = output_streams[i];
516 for (j = 0; j < ost->nb_bitstream_filters; j++)
517 av_bsf_free(&ost->bsf_ctx[j]);
518 av_freep(&ost->bsf_ctx);
519 av_freep(&ost->bsf_extradata_updated);
521 av_frame_free(&ost->filtered_frame);
522 av_frame_free(&ost->last_frame);
523 av_dict_free(&ost->encoder_opts);
525 av_parser_close(ost->parser);
526 avcodec_free_context(&ost->parser_avctx);
528 av_freep(&ost->forced_keyframes);
529 av_expr_free(ost->forced_keyframes_pexpr);
530 av_freep(&ost->avfilter);
531 av_freep(&ost->logfile_prefix);
533 av_freep(&ost->audio_channels_map);
534 ost->audio_channels_mapped = 0;
536 av_dict_free(&ost->sws_dict);
538 avcodec_free_context(&ost->enc_ctx);
539 avcodec_parameters_free(&ost->ref_par);
541 while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
543 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
544 av_packet_unref(&pkt);
546 av_fifo_freep(&ost->muxing_queue);
548 av_freep(&output_streams[i]);
551 free_input_threads();
553 for (i = 0; i < nb_input_files; i++) {
554 avformat_close_input(&input_files[i]->ctx);
555 av_freep(&input_files[i]);
557 for (i = 0; i < nb_input_streams; i++) {
558 InputStream *ist = input_streams[i];
560 av_frame_free(&ist->decoded_frame);
561 av_frame_free(&ist->filter_frame);
562 av_dict_free(&ist->decoder_opts);
563 avsubtitle_free(&ist->prev_sub.subtitle);
564 av_frame_free(&ist->sub2video.frame);
565 av_freep(&ist->filters);
566 av_freep(&ist->hwaccel_device);
567 av_freep(&ist->dts_buffer);
569 avcodec_free_context(&ist->dec_ctx);
571 av_freep(&input_streams[i]);
575 if (fclose(vstats_file))
576 av_log(NULL, AV_LOG_ERROR,
577 "Error closing vstats file, loss of information possible: %s\n",
578 av_err2str(AVERROR(errno)));
580 av_freep(&vstats_filename);
582 av_freep(&input_streams);
583 av_freep(&input_files);
584 av_freep(&output_streams);
585 av_freep(&output_files);
589 avformat_network_deinit();
591 if (received_sigterm) {
592 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
593 (int) received_sigterm);
594 } else if (ret && transcode_init_done) {
595 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
601 void remove_avoptions(AVDictionary **a, AVDictionary *b)
603 AVDictionaryEntry *t = NULL;
605 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
606 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
610 void assert_avoptions(AVDictionary *m)
612 AVDictionaryEntry *t;
613 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
614 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
619 static void abort_codec_experimental(AVCodec *c, int encoder)
624 static void update_benchmark(const char *fmt, ...)
626 if (do_benchmark_all) {
627 int64_t t = getutime();
633 vsnprintf(buf, sizeof(buf), fmt, va);
635 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
641 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
644 for (i = 0; i < nb_output_streams; i++) {
645 OutputStream *ost2 = output_streams[i];
646 ost2->finished |= ost == ost2 ? this_stream : others;
650 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
652 AVFormatContext *s = of->ctx;
653 AVStream *st = ost->st;
656 if (!of->header_written) {
657 AVPacket tmp_pkt = {0};
658 /* the muxer is not initialized yet, buffer the packet */
659 if (!av_fifo_space(ost->muxing_queue)) {
660 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
661 ost->max_muxing_queue_size);
662 if (new_size <= av_fifo_size(ost->muxing_queue)) {
663 av_log(NULL, AV_LOG_ERROR,
664 "Too many packets buffered for output stream %d:%d.\n",
665 ost->file_index, ost->st->index);
668 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
672 ret = av_packet_ref(&tmp_pkt, pkt);
675 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
676 av_packet_unref(pkt);
680 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
681 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
682 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
685 * Audio encoders may split the packets -- #frames in != #packets out.
686 * But there is no reordering, so we can limit the number of output packets
687 * by simply dropping them here.
688 * Counting encoded video frames needs to be done separately because of
689 * reordering, see do_video_out()
691 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
692 if (ost->frame_number >= ost->max_frames) {
693 av_packet_unref(pkt);
698 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
700 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
702 ost->quality = sd ? AV_RL32(sd) : -1;
703 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
705 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
707 ost->error[i] = AV_RL64(sd + 8 + 8*i);
712 if (ost->frame_rate.num && ost->is_cfr) {
713 if (pkt->duration > 0)
714 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
715 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
720 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
722 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
723 if (pkt->dts != AV_NOPTS_VALUE &&
724 pkt->pts != AV_NOPTS_VALUE &&
725 pkt->dts > pkt->pts) {
726 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
728 ost->file_index, ost->st->index);
730 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
731 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
732 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
734 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
735 pkt->dts != AV_NOPTS_VALUE &&
736 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
737 ost->last_mux_dts != AV_NOPTS_VALUE) {
738 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
739 if (pkt->dts < max) {
740 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
741 av_log(s, loglevel, "Non-monotonous DTS in output stream "
742 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
743 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
745 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
748 av_log(s, loglevel, "changing to %"PRId64". This may result "
749 "in incorrect timestamps in the output file.\n",
751 if (pkt->pts >= pkt->dts)
752 pkt->pts = FFMAX(pkt->pts, max);
757 ost->last_mux_dts = pkt->dts;
759 ost->data_size += pkt->size;
760 ost->packets_written++;
762 pkt->stream_index = ost->index;
765 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
766 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
767 av_get_media_type_string(ost->enc_ctx->codec_type),
768 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
769 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
774 ret = av_interleaved_write_frame(s, pkt);
776 print_error("av_interleaved_write_frame()", ret);
777 main_return_code = 1;
778 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
780 av_packet_unref(pkt);
783 static void close_output_stream(OutputStream *ost)
785 OutputFile *of = output_files[ost->file_index];
787 ost->finished |= ENCODER_FINISHED;
789 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
790 of->recording_time = FFMIN(of->recording_time, end);
794 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
798 /* apply the output bitstream filters, if any */
799 if (ost->nb_bitstream_filters) {
802 av_packet_split_side_data(pkt);
803 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
809 /* get a packet from the previous filter up the chain */
810 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
811 if (ret == AVERROR(EAGAIN)) {
817 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
818 * the api states this shouldn't happen after init(). Propagate it here to the
819 * muxer and to the next filters in the chain to workaround this.
820 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
821 * par_out->extradata and adapt muxers accordingly to get rid of this. */
822 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
823 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
826 ost->bsf_extradata_updated[idx - 1] |= 1;
829 /* send it to the next filter down the chain or to the muxer */
830 if (idx < ost->nb_bitstream_filters) {
831 /* HACK/FIXME! - See above */
832 if (!(ost->bsf_extradata_updated[idx] & 2)) {
833 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
836 ost->bsf_extradata_updated[idx] |= 2;
838 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
843 write_packet(of, pkt, ost);
846 write_packet(of, pkt, ost);
849 if (ret < 0 && ret != AVERROR_EOF) {
850 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
851 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
857 static int check_recording_time(OutputStream *ost)
859 OutputFile *of = output_files[ost->file_index];
861 if (of->recording_time != INT64_MAX &&
862 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
863 AV_TIME_BASE_Q) >= 0) {
864 close_output_stream(ost);
870 static void do_audio_out(OutputFile *of, OutputStream *ost,
873 AVCodecContext *enc = ost->enc_ctx;
877 av_init_packet(&pkt);
881 if (!check_recording_time(ost))
884 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
885 frame->pts = ost->sync_opts;
886 ost->sync_opts = frame->pts + frame->nb_samples;
887 ost->samples_encoded += frame->nb_samples;
888 ost->frames_encoded++;
890 av_assert0(pkt.size || !pkt.data);
891 update_benchmark(NULL);
893 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
894 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
895 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
896 enc->time_base.num, enc->time_base.den);
899 ret = avcodec_send_frame(enc, frame);
904 ret = avcodec_receive_packet(enc, &pkt);
905 if (ret == AVERROR(EAGAIN))
910 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
912 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
915 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
916 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
917 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
918 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
921 output_packet(of, &pkt, ost);
926 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
930 static void do_subtitle_out(OutputFile *of,
934 int subtitle_out_max_size = 1024 * 1024;
935 int subtitle_out_size, nb, i;
940 if (sub->pts == AV_NOPTS_VALUE) {
941 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
950 subtitle_out = av_malloc(subtitle_out_max_size);
952 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
957 /* Note: DVB subtitle need one packet to draw them and one other
958 packet to clear them */
959 /* XXX: signal it in the codec context ? */
960 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
965 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
967 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
968 pts -= output_files[ost->file_index]->start_time;
969 for (i = 0; i < nb; i++) {
970 unsigned save_num_rects = sub->num_rects;
972 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
973 if (!check_recording_time(ost))
977 // start_display_time is required to be 0
978 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
979 sub->end_display_time -= sub->start_display_time;
980 sub->start_display_time = 0;
984 ost->frames_encoded++;
986 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
987 subtitle_out_max_size, sub);
989 sub->num_rects = save_num_rects;
990 if (subtitle_out_size < 0) {
991 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
995 av_init_packet(&pkt);
996 pkt.data = subtitle_out;
997 pkt.size = subtitle_out_size;
998 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
999 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1000 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1001 /* XXX: the pts correction is handled here. Maybe handling
1002 it in the codec would be better */
1004 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1006 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1009 output_packet(of, &pkt, ost);
1013 static void do_video_out(OutputFile *of,
1015 AVFrame *next_picture,
1018 int ret, format_video_sync;
1020 AVCodecContext *enc = ost->enc_ctx;
1021 AVCodecParameters *mux_par = ost->st->codecpar;
1022 AVRational frame_rate;
1023 int nb_frames, nb0_frames, i;
1024 double delta, delta0;
1025 double duration = 0;
1027 InputStream *ist = NULL;
1028 AVFilterContext *filter = ost->filter->filter;
1030 if (ost->source_index >= 0)
1031 ist = input_streams[ost->source_index];
1033 frame_rate = av_buffersink_get_frame_rate(filter);
1034 if (frame_rate.num > 0 && frame_rate.den > 0)
1035 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1037 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1038 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1040 if (!ost->filters_script &&
1044 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1045 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1048 if (!next_picture) {
1050 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1051 ost->last_nb0_frames[1],
1052 ost->last_nb0_frames[2]);
1054 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1055 delta = delta0 + duration;
1057 /* by default, we output a single frame */
1058 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1061 format_video_sync = video_sync_method;
1062 if (format_video_sync == VSYNC_AUTO) {
1063 if(!strcmp(of->ctx->oformat->name, "avi")) {
1064 format_video_sync = VSYNC_VFR;
1066 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1068 && format_video_sync == VSYNC_CFR
1069 && input_files[ist->file_index]->ctx->nb_streams == 1
1070 && input_files[ist->file_index]->input_ts_offset == 0) {
1071 format_video_sync = VSYNC_VSCFR;
1073 if (format_video_sync == VSYNC_CFR && copy_ts) {
1074 format_video_sync = VSYNC_VSCFR;
1077 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1081 format_video_sync != VSYNC_PASSTHROUGH &&
1082 format_video_sync != VSYNC_DROP) {
1083 if (delta0 < -0.6) {
1084 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1086 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1087 sync_ipts = ost->sync_opts;
1092 switch (format_video_sync) {
1094 if (ost->frame_number == 0 && delta0 >= 0.5) {
1095 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1098 ost->sync_opts = lrint(sync_ipts);
1101 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1102 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1104 } else if (delta < -1.1)
1106 else if (delta > 1.1) {
1107 nb_frames = lrintf(delta);
1109 nb0_frames = lrintf(delta0 - 0.6);
1115 else if (delta > 0.6)
1116 ost->sync_opts = lrint(sync_ipts);
1119 case VSYNC_PASSTHROUGH:
1120 ost->sync_opts = lrint(sync_ipts);
1127 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1128 nb0_frames = FFMIN(nb0_frames, nb_frames);
1130 memmove(ost->last_nb0_frames + 1,
1131 ost->last_nb0_frames,
1132 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1133 ost->last_nb0_frames[0] = nb0_frames;
1135 if (nb0_frames == 0 && ost->last_dropped) {
1137 av_log(NULL, AV_LOG_VERBOSE,
1138 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1139 ost->frame_number, ost->st->index, ost->last_frame->pts);
1141 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1142 if (nb_frames > dts_error_threshold * 30) {
1143 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1147 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1148 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1149 if (nb_frames_dup > dup_warning) {
1150 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1154 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1156 /* duplicates frame if needed */
1157 for (i = 0; i < nb_frames; i++) {
1158 AVFrame *in_picture;
1159 av_init_packet(&pkt);
1163 if (i < nb0_frames && ost->last_frame) {
1164 in_picture = ost->last_frame;
1166 in_picture = next_picture;
1171 in_picture->pts = ost->sync_opts;
1174 if (!check_recording_time(ost))
1176 if (ost->frame_number >= ost->max_frames)
1180 #if FF_API_LAVF_FMT_RAWPICTURE
1181 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1182 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1183 /* raw pictures are written as AVPicture structure to
1184 avoid any copies. We support temporarily the older
1186 if (in_picture->interlaced_frame)
1187 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1189 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1190 pkt.data = (uint8_t *)in_picture;
1191 pkt.size = sizeof(AVPicture);
1192 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1193 pkt.flags |= AV_PKT_FLAG_KEY;
1195 output_packet(of, &pkt, ost);
1199 int forced_keyframe = 0;
1202 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1203 ost->top_field_first >= 0)
1204 in_picture->top_field_first = !!ost->top_field_first;
1206 if (in_picture->interlaced_frame) {
1207 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1208 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1210 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1212 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1214 in_picture->quality = enc->global_quality;
1215 in_picture->pict_type = 0;
1217 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1218 in_picture->pts * av_q2d(enc->time_base) : NAN;
1219 if (ost->forced_kf_index < ost->forced_kf_count &&
1220 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1221 ost->forced_kf_index++;
1222 forced_keyframe = 1;
1223 } else if (ost->forced_keyframes_pexpr) {
1225 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1226 res = av_expr_eval(ost->forced_keyframes_pexpr,
1227 ost->forced_keyframes_expr_const_values, NULL);
1228 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1229 ost->forced_keyframes_expr_const_values[FKF_N],
1230 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1231 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1232 ost->forced_keyframes_expr_const_values[FKF_T],
1233 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1236 forced_keyframe = 1;
1237 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1238 ost->forced_keyframes_expr_const_values[FKF_N];
1239 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1240 ost->forced_keyframes_expr_const_values[FKF_T];
1241 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1244 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1245 } else if ( ost->forced_keyframes
1246 && !strncmp(ost->forced_keyframes, "source", 6)
1247 && in_picture->key_frame==1) {
1248 forced_keyframe = 1;
1251 if (forced_keyframe) {
1252 in_picture->pict_type = AV_PICTURE_TYPE_I;
1253 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1256 update_benchmark(NULL);
1258 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1259 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1260 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1261 enc->time_base.num, enc->time_base.den);
1264 ost->frames_encoded++;
1266 ret = avcodec_send_frame(enc, in_picture);
1271 ret = avcodec_receive_packet(enc, &pkt);
1272 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1273 if (ret == AVERROR(EAGAIN))
1279 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1280 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1281 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1282 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1285 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1286 pkt.pts = ost->sync_opts;
1288 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1291 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1292 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1293 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1294 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1297 frame_size = pkt.size;
1298 output_packet(of, &pkt, ost);
1300 /* if two pass, output log */
1301 if (ost->logfile && enc->stats_out) {
1302 fprintf(ost->logfile, "%s", enc->stats_out);
1308 * For video, number of frames in == number of packets out.
1309 * But there may be reordering, so we can't throw away frames on encoder
1310 * flush, we need to limit them here, before they go into encoder.
1312 ost->frame_number++;
1314 if (vstats_filename && frame_size)
1315 do_video_stats(ost, frame_size);
1318 if (!ost->last_frame)
1319 ost->last_frame = av_frame_alloc();
1320 av_frame_unref(ost->last_frame);
1321 if (next_picture && ost->last_frame)
1322 av_frame_ref(ost->last_frame, next_picture);
1324 av_frame_free(&ost->last_frame);
1328 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1332 static double psnr(double d)
1334 return -10.0 * log10(d);
1337 static void do_video_stats(OutputStream *ost, int frame_size)
1339 AVCodecContext *enc;
1341 double ti1, bitrate, avg_bitrate;
1343 /* this is executed just the first time do_video_stats is called */
1345 vstats_file = fopen(vstats_filename, "w");
1353 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1354 frame_number = ost->st->nb_frames;
1355 if (vstats_version <= 1) {
1356 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1357 ost->quality / (float)FF_QP2LAMBDA);
1359 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1360 ost->quality / (float)FF_QP2LAMBDA);
1363 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1364 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1366 fprintf(vstats_file,"f_size= %6d ", frame_size);
1367 /* compute pts value */
1368 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1372 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1373 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1374 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1375 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1376 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1380 static void finish_output_stream(OutputStream *ost)
1382 OutputFile *of = output_files[ost->file_index];
1385 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1388 for (i = 0; i < of->ctx->nb_streams; i++)
1389 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1394 * Get and encode new output from any of the filtergraphs, without causing
1397 * @return 0 for success, <0 for severe errors
1399 static int reap_filters(int flush)
1401 AVFrame *filtered_frame = NULL;
1404 /* Reap all buffers present in the buffer sinks */
1405 for (i = 0; i < nb_output_streams; i++) {
1406 OutputStream *ost = output_streams[i];
1407 OutputFile *of = output_files[ost->file_index];
1408 AVFilterContext *filter;
1409 AVCodecContext *enc = ost->enc_ctx;
1414 filter = ost->filter->filter;
1416 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1417 return AVERROR(ENOMEM);
1419 filtered_frame = ost->filtered_frame;
1422 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1423 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1424 AV_BUFFERSINK_FLAG_NO_REQUEST);
1426 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1427 av_log(NULL, AV_LOG_WARNING,
1428 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1429 } else if (flush && ret == AVERROR_EOF) {
1430 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1431 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1435 if (ost->finished) {
1436 av_frame_unref(filtered_frame);
1439 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1440 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1441 AVRational filter_tb = av_buffersink_get_time_base(filter);
1442 AVRational tb = enc->time_base;
1443 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1445 tb.den <<= extra_bits;
1447 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1448 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1449 float_pts /= 1 << extra_bits;
1450 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1451 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1453 filtered_frame->pts =
1454 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1455 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1457 //if (ost->source_index >= 0)
1458 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1460 switch (av_buffersink_get_type(filter)) {
1461 case AVMEDIA_TYPE_VIDEO:
1462 if (!ost->frame_aspect_ratio.num)
1463 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1466 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1467 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1469 enc->time_base.num, enc->time_base.den);
1472 do_video_out(of, ost, filtered_frame, float_pts);
1474 case AVMEDIA_TYPE_AUDIO:
1475 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1476 enc->channels != av_frame_get_channels(filtered_frame)) {
1477 av_log(NULL, AV_LOG_ERROR,
1478 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1481 do_audio_out(of, ost, filtered_frame);
1484 // TODO support subtitle filters
1488 av_frame_unref(filtered_frame);
1495 static void print_final_stats(int64_t total_size)
1497 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1498 uint64_t subtitle_size = 0;
1499 uint64_t data_size = 0;
1500 float percent = -1.0;
1504 for (i = 0; i < nb_output_streams; i++) {
1505 OutputStream *ost = output_streams[i];
1506 switch (ost->enc_ctx->codec_type) {
1507 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1508 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1509 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1510 default: other_size += ost->data_size; break;
1512 extra_size += ost->enc_ctx->extradata_size;
1513 data_size += ost->data_size;
1514 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1515 != AV_CODEC_FLAG_PASS1)
1519 if (data_size && total_size>0 && total_size >= data_size)
1520 percent = 100.0 * (total_size - data_size) / data_size;
1522 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1523 video_size / 1024.0,
1524 audio_size / 1024.0,
1525 subtitle_size / 1024.0,
1526 other_size / 1024.0,
1527 extra_size / 1024.0);
1529 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1531 av_log(NULL, AV_LOG_INFO, "unknown");
1532 av_log(NULL, AV_LOG_INFO, "\n");
1534 /* print verbose per-stream stats */
1535 for (i = 0; i < nb_input_files; i++) {
1536 InputFile *f = input_files[i];
1537 uint64_t total_packets = 0, total_size = 0;
1539 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1540 i, f->ctx->filename);
1542 for (j = 0; j < f->nb_streams; j++) {
1543 InputStream *ist = input_streams[f->ist_index + j];
1544 enum AVMediaType type = ist->dec_ctx->codec_type;
1546 total_size += ist->data_size;
1547 total_packets += ist->nb_packets;
1549 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1550 i, j, media_type_string(type));
1551 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1552 ist->nb_packets, ist->data_size);
1554 if (ist->decoding_needed) {
1555 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1556 ist->frames_decoded);
1557 if (type == AVMEDIA_TYPE_AUDIO)
1558 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1559 av_log(NULL, AV_LOG_VERBOSE, "; ");
1562 av_log(NULL, AV_LOG_VERBOSE, "\n");
1565 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1566 total_packets, total_size);
1569 for (i = 0; i < nb_output_files; i++) {
1570 OutputFile *of = output_files[i];
1571 uint64_t total_packets = 0, total_size = 0;
1573 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1574 i, of->ctx->filename);
1576 for (j = 0; j < of->ctx->nb_streams; j++) {
1577 OutputStream *ost = output_streams[of->ost_index + j];
1578 enum AVMediaType type = ost->enc_ctx->codec_type;
1580 total_size += ost->data_size;
1581 total_packets += ost->packets_written;
1583 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1584 i, j, media_type_string(type));
1585 if (ost->encoding_needed) {
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1587 ost->frames_encoded);
1588 if (type == AVMEDIA_TYPE_AUDIO)
1589 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1590 av_log(NULL, AV_LOG_VERBOSE, "; ");
1593 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1594 ost->packets_written, ost->data_size);
1596 av_log(NULL, AV_LOG_VERBOSE, "\n");
1599 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1600 total_packets, total_size);
1602 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1603 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1605 av_log(NULL, AV_LOG_WARNING, "\n");
1607 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1612 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1615 AVBPrint buf_script;
1617 AVFormatContext *oc;
1619 AVCodecContext *enc;
1620 int frame_number, vid, i;
1623 int64_t pts = INT64_MIN + 1;
1624 static int64_t last_time = -1;
1625 static int qp_histogram[52];
1626 int hours, mins, secs, us;
1630 if (!print_stats && !is_last_report && !progress_avio)
1633 if (!is_last_report) {
1634 if (last_time == -1) {
1635 last_time = cur_time;
1638 if ((cur_time - last_time) < 500000)
1640 last_time = cur_time;
1643 t = (cur_time-timer_start) / 1000000.0;
1646 oc = output_files[0]->ctx;
1648 total_size = avio_size(oc->pb);
1649 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1650 total_size = avio_tell(oc->pb);
1654 av_bprint_init(&buf_script, 0, 1);
1655 for (i = 0; i < nb_output_streams; i++) {
1657 ost = output_streams[i];
1659 if (!ost->stream_copy)
1660 q = ost->quality / (float) FF_QP2LAMBDA;
1662 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1663 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1664 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1665 ost->file_index, ost->index, q);
1667 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1670 frame_number = ost->frame_number;
1671 fps = t > 1 ? frame_number / t : 0;
1672 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1673 frame_number, fps < 9.95, fps, q);
1674 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1675 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1676 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1677 ost->file_index, ost->index, q);
1679 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1683 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1685 for (j = 0; j < 32; j++)
1686 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1689 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1691 double error, error_sum = 0;
1692 double scale, scale_sum = 0;
1694 char type[3] = { 'Y','U','V' };
1695 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1696 for (j = 0; j < 3; j++) {
1697 if (is_last_report) {
1698 error = enc->error[j];
1699 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1701 error = ost->error[j];
1702 scale = enc->width * enc->height * 255.0 * 255.0;
1708 p = psnr(error / scale);
1709 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1710 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1711 ost->file_index, ost->index, type[j] | 32, p);
1713 p = psnr(error_sum / scale_sum);
1714 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1715 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1716 ost->file_index, ost->index, p);
1720 /* compute min output value */
1721 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1722 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1723 ost->st->time_base, AV_TIME_BASE_Q));
1725 nb_frames_drop += ost->last_dropped;
1728 secs = FFABS(pts) / AV_TIME_BASE;
1729 us = FFABS(pts) % AV_TIME_BASE;
1735 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1736 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1738 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1740 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1741 "size=%8.0fkB time=", total_size / 1024.0);
1743 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1745 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1746 (100 * us) / AV_TIME_BASE);
1749 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1750 av_bprintf(&buf_script, "bitrate=N/A\n");
1752 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1753 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1756 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1757 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1758 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1759 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1760 hours, mins, secs, us);
1762 if (nb_frames_dup || nb_frames_drop)
1763 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1764 nb_frames_dup, nb_frames_drop);
1765 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1766 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1769 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1770 av_bprintf(&buf_script, "speed=N/A\n");
1772 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1773 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1776 if (print_stats || is_last_report) {
1777 const char end = is_last_report ? '\n' : '\r';
1778 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1779 fprintf(stderr, "%s %c", buf, end);
1781 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1786 if (progress_avio) {
1787 av_bprintf(&buf_script, "progress=%s\n",
1788 is_last_report ? "end" : "continue");
1789 avio_write(progress_avio, buf_script.str,
1790 FFMIN(buf_script.len, buf_script.size - 1));
1791 avio_flush(progress_avio);
1792 av_bprint_finalize(&buf_script, NULL);
1793 if (is_last_report) {
1794 if ((ret = avio_closep(&progress_avio)) < 0)
1795 av_log(NULL, AV_LOG_ERROR,
1796 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1801 print_final_stats(total_size);
1804 static void flush_encoders(void)
1808 for (i = 0; i < nb_output_streams; i++) {
1809 OutputStream *ost = output_streams[i];
1810 AVCodecContext *enc = ost->enc_ctx;
1811 OutputFile *of = output_files[ost->file_index];
1813 if (!ost->encoding_needed)
1816 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1818 #if FF_API_LAVF_FMT_RAWPICTURE
1819 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1823 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1826 avcodec_send_frame(enc, NULL);
1829 const char *desc = NULL;
1833 switch (enc->codec_type) {
1834 case AVMEDIA_TYPE_AUDIO:
1837 case AVMEDIA_TYPE_VIDEO:
1844 av_init_packet(&pkt);
1848 update_benchmark(NULL);
1849 ret = avcodec_receive_packet(enc, &pkt);
1850 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1851 if (ret < 0 && ret != AVERROR_EOF) {
1852 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1857 if (ost->logfile && enc->stats_out) {
1858 fprintf(ost->logfile, "%s", enc->stats_out);
1860 if (ret == AVERROR_EOF) {
1863 if (ost->finished & MUXER_FINISHED) {
1864 av_packet_unref(&pkt);
1867 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1868 pkt_size = pkt.size;
1869 output_packet(of, &pkt, ost);
1870 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1871 do_video_stats(ost, pkt_size);
1878 * Check whether a packet from ist should be written into ost at this time
1880 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1882 OutputFile *of = output_files[ost->file_index];
1883 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1885 if (ost->source_index != ist_index)
1891 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1897 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1899 OutputFile *of = output_files[ost->file_index];
1900 InputFile *f = input_files [ist->file_index];
1901 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1902 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1906 av_init_packet(&opkt);
1908 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1909 !ost->copy_initial_nonkeyframes)
1912 if (!ost->frame_number && !ost->copy_prior_start) {
1913 int64_t comp_start = start_time;
1914 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1915 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1916 if (pkt->pts == AV_NOPTS_VALUE ?
1917 ist->pts < comp_start :
1918 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1922 if (of->recording_time != INT64_MAX &&
1923 ist->pts >= of->recording_time + start_time) {
1924 close_output_stream(ost);
1928 if (f->recording_time != INT64_MAX) {
1929 start_time = f->ctx->start_time;
1930 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1931 start_time += f->start_time;
1932 if (ist->pts >= f->recording_time + start_time) {
1933 close_output_stream(ost);
1938 /* force the input stream PTS */
1939 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1942 if (pkt->pts != AV_NOPTS_VALUE)
1943 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1945 opkt.pts = AV_NOPTS_VALUE;
1947 if (pkt->dts == AV_NOPTS_VALUE)
1948 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
1950 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1951 opkt.dts -= ost_tb_start_time;
1953 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1954 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1956 duration = ist->dec_ctx->frame_size;
1957 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1958 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1959 ost->mux_timebase) - ost_tb_start_time;
1962 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1964 opkt.flags = pkt->flags;
1965 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1966 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1967 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1968 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1969 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1971 int ret = av_parser_change(ost->parser, ost->parser_avctx,
1972 &opkt.data, &opkt.size,
1973 pkt->data, pkt->size,
1974 pkt->flags & AV_PKT_FLAG_KEY);
1976 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1981 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1986 opkt.data = pkt->data;
1987 opkt.size = pkt->size;
1989 av_copy_packet_side_data(&opkt, pkt);
1991 #if FF_API_LAVF_FMT_RAWPICTURE
1992 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1993 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1994 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1995 /* store AVPicture in AVPacket, as expected by the output format */
1996 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1998 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2002 opkt.data = (uint8_t *)&pict;
2003 opkt.size = sizeof(AVPicture);
2004 opkt.flags |= AV_PKT_FLAG_KEY;
2008 output_packet(of, &opkt, ost);
2011 int guess_input_channel_layout(InputStream *ist)
2013 AVCodecContext *dec = ist->dec_ctx;
2015 if (!dec->channel_layout) {
2016 char layout_name[256];
2018 if (dec->channels > ist->guess_layout_max)
2020 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2021 if (!dec->channel_layout)
2023 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2024 dec->channels, dec->channel_layout);
2025 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2026 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2031 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2033 if (*got_output || ret<0)
2034 decode_error_stat[ret<0] ++;
2036 if (ret < 0 && exit_on_error)
2039 if (exit_on_error && *got_output && ist) {
2040 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2041 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2047 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2048 // There is the following difference: if you got a frame, you must call
2049 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2050 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2051 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2058 ret = avcodec_send_packet(avctx, pkt);
2059 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2060 // decoded frames with avcodec_receive_frame() until done.
2061 if (ret < 0 && ret != AVERROR_EOF)
2065 ret = avcodec_receive_frame(avctx, frame);
2066 if (ret < 0 && ret != AVERROR(EAGAIN))
2074 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2079 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2080 for (i = 0; i < ist->nb_filters; i++) {
2081 if (i < ist->nb_filters - 1) {
2082 f = ist->filter_frame;
2083 ret = av_frame_ref(f, decoded_frame);
2088 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2089 AV_BUFFERSRC_FLAG_PUSH);
2090 if (ret == AVERROR_EOF)
2091 ret = 0; /* ignore */
2093 av_log(NULL, AV_LOG_ERROR,
2094 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2101 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2103 AVFrame *decoded_frame;
2104 AVCodecContext *avctx = ist->dec_ctx;
2105 int i, ret, err = 0, resample_changed;
2106 AVRational decoded_frame_tb;
2108 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2109 return AVERROR(ENOMEM);
2110 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2111 return AVERROR(ENOMEM);
2112 decoded_frame = ist->decoded_frame;
2114 update_benchmark(NULL);
2115 ret = decode(avctx, decoded_frame, got_output, pkt);
2116 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2118 if (ret >= 0 && avctx->sample_rate <= 0) {
2119 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2120 ret = AVERROR_INVALIDDATA;
2123 if (ret != AVERROR_EOF)
2124 check_decode_result(ist, got_output, ret);
2126 if (!*got_output || ret < 0)
2129 ist->samples_decoded += decoded_frame->nb_samples;
2130 ist->frames_decoded++;
2133 /* increment next_dts to use for the case where the input stream does not
2134 have timestamps or there are multiple frames in the packet */
2135 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2137 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2141 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2142 ist->resample_channels != avctx->channels ||
2143 ist->resample_channel_layout != decoded_frame->channel_layout ||
2144 ist->resample_sample_rate != decoded_frame->sample_rate;
2145 if (resample_changed) {
2146 char layout1[64], layout2[64];
2148 if (!guess_input_channel_layout(ist)) {
2149 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2150 "layout for Input Stream #%d.%d\n", ist->file_index,
2154 decoded_frame->channel_layout = avctx->channel_layout;
2156 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2157 ist->resample_channel_layout);
2158 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2159 decoded_frame->channel_layout);
2161 av_log(NULL, AV_LOG_INFO,
2162 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2163 ist->file_index, ist->st->index,
2164 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2165 ist->resample_channels, layout1,
2166 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2167 avctx->channels, layout2);
2169 ist->resample_sample_fmt = decoded_frame->format;
2170 ist->resample_sample_rate = decoded_frame->sample_rate;
2171 ist->resample_channel_layout = decoded_frame->channel_layout;
2172 ist->resample_channels = avctx->channels;
2174 for (i = 0; i < ist->nb_filters; i++) {
2175 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2177 av_log(NULL, AV_LOG_ERROR,
2178 "Error reconfiguring input stream %d:%d filter %d\n",
2179 ist->file_index, ist->st->index, i);
2184 for (i = 0; i < nb_filtergraphs; i++)
2185 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2186 FilterGraph *fg = filtergraphs[i];
2187 if (configure_filtergraph(fg) < 0) {
2188 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2194 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2195 decoded_frame_tb = ist->st->time_base;
2196 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2197 decoded_frame->pts = pkt->pts;
2198 decoded_frame_tb = ist->st->time_base;
2200 decoded_frame->pts = ist->dts;
2201 decoded_frame_tb = AV_TIME_BASE_Q;
2203 if (decoded_frame->pts != AV_NOPTS_VALUE)
2204 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2205 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2206 (AVRational){1, avctx->sample_rate});
2207 ist->nb_samples = decoded_frame->nb_samples;
2208 err = send_frame_to_filters(ist, decoded_frame);
2209 decoded_frame->pts = AV_NOPTS_VALUE;
2212 av_frame_unref(ist->filter_frame);
2213 av_frame_unref(decoded_frame);
2214 return err < 0 ? err : ret;
2217 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2219 AVFrame *decoded_frame;
2220 int i, ret = 0, err = 0, resample_changed;
2221 int64_t best_effort_timestamp;
2222 int64_t dts = AV_NOPTS_VALUE;
2225 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2226 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2228 if (!eof && pkt && pkt->size == 0)
2231 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2232 return AVERROR(ENOMEM);
2233 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2234 return AVERROR(ENOMEM);
2235 decoded_frame = ist->decoded_frame;
2236 if (ist->dts != AV_NOPTS_VALUE)
2237 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2240 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2243 // The old code used to set dts on the drain packet, which does not work
2244 // with the new API anymore.
2246 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2248 return AVERROR(ENOMEM);
2249 ist->dts_buffer = new;
2250 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2253 update_benchmark(NULL);
2254 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2255 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2257 // The following line may be required in some cases where there is no parser
2258 // or the parser does not has_b_frames correctly
2259 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2260 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2261 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2263 av_log(ist->dec_ctx, AV_LOG_WARNING,
2264 "video_delay is larger in decoder than demuxer %d > %d.\n"
2265 "If you want to help, upload a sample "
2266 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2267 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2268 ist->dec_ctx->has_b_frames,
2269 ist->st->codecpar->video_delay);
2272 if (ret != AVERROR_EOF)
2273 check_decode_result(ist, got_output, ret);
2275 if (*got_output && ret >= 0) {
2276 if (ist->dec_ctx->width != decoded_frame->width ||
2277 ist->dec_ctx->height != decoded_frame->height ||
2278 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2279 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2280 decoded_frame->width,
2281 decoded_frame->height,
2282 decoded_frame->format,
2283 ist->dec_ctx->width,
2284 ist->dec_ctx->height,
2285 ist->dec_ctx->pix_fmt);
2289 if (!*got_output || ret < 0)
2292 if(ist->top_field_first>=0)
2293 decoded_frame->top_field_first = ist->top_field_first;
2295 ist->frames_decoded++;
2297 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2298 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2302 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2304 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2306 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2307 best_effort_timestamp = ist->dts_buffer[0];
2309 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2310 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2311 ist->nb_dts_buffer--;
2314 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2315 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2317 if (ts != AV_NOPTS_VALUE)
2318 ist->next_pts = ist->pts = ts;
2322 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2323 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2324 ist->st->index, av_ts2str(decoded_frame->pts),
2325 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2326 best_effort_timestamp,
2327 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2328 decoded_frame->key_frame, decoded_frame->pict_type,
2329 ist->st->time_base.num, ist->st->time_base.den);
2332 if (ist->st->sample_aspect_ratio.num)
2333 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2335 resample_changed = ist->resample_width != decoded_frame->width ||
2336 ist->resample_height != decoded_frame->height ||
2337 ist->resample_pix_fmt != decoded_frame->format;
2338 if (resample_changed) {
2339 av_log(NULL, AV_LOG_INFO,
2340 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2341 ist->file_index, ist->st->index,
2342 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2343 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2345 ist->resample_width = decoded_frame->width;
2346 ist->resample_height = decoded_frame->height;
2347 ist->resample_pix_fmt = decoded_frame->format;
2349 for (i = 0; i < ist->nb_filters; i++) {
2350 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
2352 av_log(NULL, AV_LOG_ERROR,
2353 "Error reconfiguring input stream %d:%d filter %d\n",
2354 ist->file_index, ist->st->index, i);
2359 for (i = 0; i < nb_filtergraphs; i++) {
2360 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2361 configure_filtergraph(filtergraphs[i]) < 0) {
2362 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2368 err = send_frame_to_filters(ist, decoded_frame);
2371 av_frame_unref(ist->filter_frame);
2372 av_frame_unref(decoded_frame);
2373 return err < 0 ? err : ret;
2376 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2378 AVSubtitle subtitle;
2379 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2380 &subtitle, got_output, pkt);
2382 check_decode_result(NULL, got_output, ret);
2384 if (ret < 0 || !*got_output) {
2386 sub2video_flush(ist);
2390 if (ist->fix_sub_duration) {
2392 if (ist->prev_sub.got_output) {
2393 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2394 1000, AV_TIME_BASE);
2395 if (end < ist->prev_sub.subtitle.end_display_time) {
2396 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2397 "Subtitle duration reduced from %d to %d%s\n",
2398 ist->prev_sub.subtitle.end_display_time, end,
2399 end <= 0 ? ", dropping it" : "");
2400 ist->prev_sub.subtitle.end_display_time = end;
2403 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2404 FFSWAP(int, ret, ist->prev_sub.ret);
2405 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2413 sub2video_update(ist, &subtitle);
2415 if (!subtitle.num_rects)
2418 ist->frames_decoded++;
2420 for (i = 0; i < nb_output_streams; i++) {
2421 OutputStream *ost = output_streams[i];
2423 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2424 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2427 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2431 avsubtitle_free(&subtitle);
2435 static int send_filter_eof(InputStream *ist)
2438 for (i = 0; i < ist->nb_filters; i++) {
2439 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2446 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2447 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2451 int eof_reached = 0;
2454 if (!ist->saw_first_ts) {
2455 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2457 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2458 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2459 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2461 ist->saw_first_ts = 1;
2464 if (ist->next_dts == AV_NOPTS_VALUE)
2465 ist->next_dts = ist->dts;
2466 if (ist->next_pts == AV_NOPTS_VALUE)
2467 ist->next_pts = ist->pts;
2471 av_init_packet(&avpkt);
2478 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2479 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2480 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2481 ist->next_pts = ist->pts = ist->dts;
2484 // while we have more to decode or while the decoder did output something on EOF
2485 while (ist->decoding_needed) {
2489 ist->pts = ist->next_pts;
2490 ist->dts = ist->next_dts;
2492 switch (ist->dec_ctx->codec_type) {
2493 case AVMEDIA_TYPE_AUDIO:
2494 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2496 case AVMEDIA_TYPE_VIDEO:
2497 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2498 if (!repeating || !pkt || got_output) {
2499 if (pkt && pkt->duration) {
2500 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2501 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2502 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2503 duration = ((int64_t)AV_TIME_BASE *
2504 ist->dec_ctx->framerate.den * ticks) /
2505 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2508 if(ist->dts != AV_NOPTS_VALUE && duration) {
2509 ist->next_dts += duration;
2511 ist->next_dts = AV_NOPTS_VALUE;
2515 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2517 case AVMEDIA_TYPE_SUBTITLE:
2520 ret = transcode_subtitles(ist, &avpkt, &got_output);
2521 if (!pkt && ret >= 0)
2528 if (ret == AVERROR_EOF) {
2534 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2535 ist->file_index, ist->st->index, av_err2str(ret));
2538 // Decoding might not terminate if we're draining the decoder, and
2539 // the decoder keeps returning an error.
2540 // This should probably be considered a libavcodec issue.
2541 // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2550 // During draining, we might get multiple output frames in this loop.
2551 // ffmpeg.c does not drain the filter chain on configuration changes,
2552 // which means if we send multiple frames at once to the filters, and
2553 // one of those frames changes configuration, the buffered frames will
2554 // be lost. This can upset certain FATE tests.
2555 // Decode only 1 frame per call on EOF to appease these FATE tests.
2556 // The ideal solution would be to rewrite decoding to use the new
2557 // decoding API in a better way.
2564 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2565 /* except when looping we need to flush but not to send an EOF */
2566 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2567 int ret = send_filter_eof(ist);
2569 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2574 /* handle stream copy */
2575 if (!ist->decoding_needed) {
2576 ist->dts = ist->next_dts;
2577 switch (ist->dec_ctx->codec_type) {
2578 case AVMEDIA_TYPE_AUDIO:
2579 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2580 ist->dec_ctx->sample_rate;
2582 case AVMEDIA_TYPE_VIDEO:
2583 if (ist->framerate.num) {
2584 // TODO: Remove work-around for c99-to-c89 issue 7
2585 AVRational time_base_q = AV_TIME_BASE_Q;
2586 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2587 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2588 } else if (pkt->duration) {
2589 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2590 } else if(ist->dec_ctx->framerate.num != 0) {
2591 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2592 ist->next_dts += ((int64_t)AV_TIME_BASE *
2593 ist->dec_ctx->framerate.den * ticks) /
2594 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2598 ist->pts = ist->dts;
2599 ist->next_pts = ist->next_dts;
2601 for (i = 0; pkt && i < nb_output_streams; i++) {
2602 OutputStream *ost = output_streams[i];
2604 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2607 do_streamcopy(ist, ost, pkt);
2610 return !eof_reached;
2613 static void print_sdp(void)
2618 AVIOContext *sdp_pb;
2619 AVFormatContext **avc;
2621 for (i = 0; i < nb_output_files; i++) {
2622 if (!output_files[i]->header_written)
2626 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2629 for (i = 0, j = 0; i < nb_output_files; i++) {
2630 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2631 avc[j] = output_files[i]->ctx;
2639 av_sdp_create(avc, j, sdp, sizeof(sdp));
2641 if (!sdp_filename) {
2642 printf("SDP:\n%s\n", sdp);
2645 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2646 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2648 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2649 avio_closep(&sdp_pb);
2650 av_freep(&sdp_filename);
2658 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2661 for (i = 0; hwaccels[i].name; i++)
2662 if (hwaccels[i].pix_fmt == pix_fmt)
2663 return &hwaccels[i];
2667 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2669 InputStream *ist = s->opaque;
2670 const enum AVPixelFormat *p;
2673 for (p = pix_fmts; *p != -1; p++) {
2674 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2675 const HWAccel *hwaccel;
2677 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2680 hwaccel = get_hwaccel(*p);
2682 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2683 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2686 ret = hwaccel->init(s);
2688 if (ist->hwaccel_id == hwaccel->id) {
2689 av_log(NULL, AV_LOG_FATAL,
2690 "%s hwaccel requested for input stream #%d:%d, "
2691 "but cannot be initialized.\n", hwaccel->name,
2692 ist->file_index, ist->st->index);
2693 return AV_PIX_FMT_NONE;
2698 if (ist->hw_frames_ctx) {
2699 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2700 if (!s->hw_frames_ctx)
2701 return AV_PIX_FMT_NONE;
2704 ist->active_hwaccel_id = hwaccel->id;
2705 ist->hwaccel_pix_fmt = *p;
2712 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2714 InputStream *ist = s->opaque;
2716 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2717 return ist->hwaccel_get_buffer(s, frame, flags);
2719 return avcodec_default_get_buffer2(s, frame, flags);
2722 static int init_input_stream(int ist_index, char *error, int error_len)
2725 InputStream *ist = input_streams[ist_index];
2727 for (i = 0; i < ist->nb_filters; i++) {
2728 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
2730 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
2735 if (ist->decoding_needed) {
2736 AVCodec *codec = ist->dec;
2738 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2739 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2740 return AVERROR(EINVAL);
2743 ist->dec_ctx->opaque = ist;
2744 ist->dec_ctx->get_format = get_format;
2745 ist->dec_ctx->get_buffer2 = get_buffer;
2746 ist->dec_ctx->thread_safe_callbacks = 1;
2748 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2749 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2750 (ist->decoding_needed & DECODING_FOR_OST)) {
2751 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2752 if (ist->decoding_needed & DECODING_FOR_FILTER)
2753 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2756 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2758 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2759 * audio, and video decoders such as cuvid or mediacodec */
2760 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2762 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2763 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2764 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2765 if (ret == AVERROR_EXPERIMENTAL)
2766 abort_codec_experimental(codec, 0);
2768 snprintf(error, error_len,
2769 "Error while opening decoder for input stream "
2771 ist->file_index, ist->st->index, av_err2str(ret));
2774 assert_avoptions(ist->decoder_opts);
2777 ist->next_pts = AV_NOPTS_VALUE;
2778 ist->next_dts = AV_NOPTS_VALUE;
2783 static InputStream *get_input_stream(OutputStream *ost)
2785 if (ost->source_index >= 0)
2786 return input_streams[ost->source_index];
2790 static int compare_int64(const void *a, const void *b)
2792 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2795 /* open the muxer when all the streams are initialized */
2796 static int check_init_output_file(OutputFile *of, int file_index)
2800 for (i = 0; i < of->ctx->nb_streams; i++) {
2801 OutputStream *ost = output_streams[of->ost_index + i];
2802 if (!ost->initialized)
2806 of->ctx->interrupt_callback = int_cb;
2808 ret = avformat_write_header(of->ctx, &of->opts);
2810 av_log(NULL, AV_LOG_ERROR,
2811 "Could not write header for output file #%d "
2812 "(incorrect codec parameters ?): %s\n",
2813 file_index, av_err2str(ret));
2816 //assert_avoptions(of->opts);
2817 of->header_written = 1;
2819 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2821 if (sdp_filename || want_sdp)
2824 /* flush the muxing queues */
2825 for (i = 0; i < of->ctx->nb_streams; i++) {
2826 OutputStream *ost = output_streams[of->ost_index + i];
2828 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2829 if (!av_fifo_size(ost->muxing_queue))
2830 ost->mux_timebase = ost->st->time_base;
2832 while (av_fifo_size(ost->muxing_queue)) {
2834 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2835 write_packet(of, &pkt, ost);
2842 static int init_output_bsfs(OutputStream *ost)
2847 if (!ost->nb_bitstream_filters)
2850 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2851 ctx = ost->bsf_ctx[i];
2853 ret = avcodec_parameters_copy(ctx->par_in,
2854 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2858 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2860 ret = av_bsf_init(ctx);
2862 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2863 ost->bsf_ctx[i]->filter->name);
2868 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2869 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2873 ost->st->time_base = ctx->time_base_out;
2878 static int init_output_stream_streamcopy(OutputStream *ost)
2880 OutputFile *of = output_files[ost->file_index];
2881 InputStream *ist = get_input_stream(ost);
2882 AVCodecParameters *par_dst = ost->st->codecpar;
2883 AVCodecParameters *par_src = ost->ref_par;
2886 uint32_t codec_tag = par_dst->codec_tag;
2888 av_assert0(ist && !ost->filter);
2890 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2892 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2894 av_log(NULL, AV_LOG_FATAL,
2895 "Error setting up codec context options.\n");
2898 avcodec_parameters_from_context(par_src, ost->enc_ctx);
2901 unsigned int codec_tag_tmp;
2902 if (!of->ctx->oformat->codec_tag ||
2903 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2904 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
2905 codec_tag = par_src->codec_tag;
2908 ret = avcodec_parameters_copy(par_dst, par_src);
2912 par_dst->codec_tag = codec_tag;
2914 if (!ost->frame_rate.num)
2915 ost->frame_rate = ist->framerate;
2916 ost->st->avg_frame_rate = ost->frame_rate;
2918 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
2922 // copy timebase while removing common factors
2923 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
2924 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2926 // copy estimated duration as a hint to the muxer
2927 if (ost->st->duration <= 0 && ist->st->duration > 0)
2928 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2931 ost->st->disposition = ist->st->disposition;
2933 if (ist->st->nb_side_data) {
2934 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2935 sizeof(*ist->st->side_data));
2936 if (!ost->st->side_data)
2937 return AVERROR(ENOMEM);
2939 ost->st->nb_side_data = 0;
2940 for (i = 0; i < ist->st->nb_side_data; i++) {
2941 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2942 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2944 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2947 sd_dst->data = av_malloc(sd_src->size);
2949 return AVERROR(ENOMEM);
2950 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2951 sd_dst->size = sd_src->size;
2952 sd_dst->type = sd_src->type;
2953 ost->st->nb_side_data++;
2957 ost->parser = av_parser_init(par_dst->codec_id);
2958 ost->parser_avctx = avcodec_alloc_context3(NULL);
2959 if (!ost->parser_avctx)
2960 return AVERROR(ENOMEM);
2962 switch (par_dst->codec_type) {
2963 case AVMEDIA_TYPE_AUDIO:
2964 if (audio_volume != 256) {
2965 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2968 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2969 par_dst->block_align= 0;
2970 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2971 par_dst->block_align= 0;
2973 case AVMEDIA_TYPE_VIDEO:
2974 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2976 av_mul_q(ost->frame_aspect_ratio,
2977 (AVRational){ par_dst->height, par_dst->width });
2978 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2979 "with stream copy may produce invalid files\n");
2981 else if (ist->st->sample_aspect_ratio.num)
2982 sar = ist->st->sample_aspect_ratio;
2984 sar = par_src->sample_aspect_ratio;
2985 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2986 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2987 ost->st->r_frame_rate = ist->st->r_frame_rate;
2991 ost->mux_timebase = ist->st->time_base;
2996 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2998 AVDictionaryEntry *e;
3000 uint8_t *encoder_string;
3001 int encoder_string_len;
3002 int format_flags = 0;
3003 int codec_flags = 0;
3005 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3008 e = av_dict_get(of->opts, "fflags", NULL, 0);
3010 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3013 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3015 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3017 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3020 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3023 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3024 encoder_string = av_mallocz(encoder_string_len);
3025 if (!encoder_string)
3028 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3029 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3031 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3032 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3033 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3034 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3037 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3038 AVCodecContext *avctx)
3041 int n = 1, i, size, index = 0;
3044 for (p = kf; *p; p++)
3048 pts = av_malloc_array(size, sizeof(*pts));
3050 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3055 for (i = 0; i < n; i++) {
3056 char *next = strchr(p, ',');
3061 if (!memcmp(p, "chapters", 8)) {
3063 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3066 if (avf->nb_chapters > INT_MAX - size ||
3067 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3069 av_log(NULL, AV_LOG_FATAL,
3070 "Could not allocate forced key frames array.\n");
3073 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3074 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3076 for (j = 0; j < avf->nb_chapters; j++) {
3077 AVChapter *c = avf->chapters[j];
3078 av_assert1(index < size);
3079 pts[index++] = av_rescale_q(c->start, c->time_base,
3080 avctx->time_base) + t;
3085 t = parse_time_or_die("force_key_frames", p, 1);
3086 av_assert1(index < size);
3087 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3094 av_assert0(index == size);
3095 qsort(pts, size, sizeof(*pts), compare_int64);
3096 ost->forced_kf_count = size;
3097 ost->forced_kf_pts = pts;
3100 static int init_output_stream_encode(OutputStream *ost)
3102 InputStream *ist = get_input_stream(ost);
3103 AVCodecContext *enc_ctx = ost->enc_ctx;
3104 AVCodecContext *dec_ctx = NULL;
3105 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3108 set_encoder_id(output_files[ost->file_index], ost);
3111 ost->st->disposition = ist->st->disposition;
3113 dec_ctx = ist->dec_ctx;
3115 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3117 for (j = 0; j < oc->nb_streams; j++) {
3118 AVStream *st = oc->streams[j];
3119 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3122 if (j == oc->nb_streams)
3123 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3124 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3125 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3128 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3129 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3130 filtergraph_is_simple(ost->filter->graph)) {
3131 FilterGraph *fg = ost->filter->graph;
3133 if (configure_filtergraph(fg)) {
3134 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3139 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3140 if (!ost->frame_rate.num)
3141 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3142 if (ist && !ost->frame_rate.num)
3143 ost->frame_rate = ist->framerate;
3144 if (ist && !ost->frame_rate.num)
3145 ost->frame_rate = ist->st->r_frame_rate;
3146 if (ist && !ost->frame_rate.num) {
3147 ost->frame_rate = (AVRational){25, 1};
3148 av_log(NULL, AV_LOG_WARNING,
3150 "about the input framerate is available. Falling "
3151 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3152 "if you want a different framerate.\n",
3153 ost->file_index, ost->index);
3155 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3156 if (ost->enc->supported_framerates && !ost->force_fps) {
3157 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3158 ost->frame_rate = ost->enc->supported_framerates[idx];
3160 // reduce frame rate for mpeg4 to be within the spec limits
3161 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3162 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3163 ost->frame_rate.num, ost->frame_rate.den, 65535);
3167 switch (enc_ctx->codec_type) {
3168 case AVMEDIA_TYPE_AUDIO:
3169 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3171 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3172 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3173 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3174 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3175 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3176 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3178 case AVMEDIA_TYPE_VIDEO:
3179 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3180 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3181 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3182 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3183 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3184 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3185 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3187 for (j = 0; j < ost->forced_kf_count; j++)
3188 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3190 enc_ctx->time_base);
3192 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3193 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3194 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3195 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3196 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3197 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3198 if (!strncmp(ost->enc->name, "libx264", 7) &&
3199 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3200 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3201 av_log(NULL, AV_LOG_WARNING,
3202 "No pixel format specified, %s for H.264 encoding chosen.\n"
3203 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3204 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3205 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3206 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3207 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3208 av_log(NULL, AV_LOG_WARNING,
3209 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3210 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3211 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3212 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3214 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3215 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3217 ost->st->avg_frame_rate = ost->frame_rate;
3220 enc_ctx->width != dec_ctx->width ||
3221 enc_ctx->height != dec_ctx->height ||
3222 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3223 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3226 if (ost->forced_keyframes) {
3227 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3228 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3229 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3231 av_log(NULL, AV_LOG_ERROR,
3232 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3235 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3236 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3237 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3238 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3240 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3241 // parse it only for static kf timings
3242 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3243 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3247 case AVMEDIA_TYPE_SUBTITLE:
3248 enc_ctx->time_base = AV_TIME_BASE_Q;
3249 if (!enc_ctx->width) {
3250 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3251 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3254 case AVMEDIA_TYPE_DATA:
3261 ost->mux_timebase = enc_ctx->time_base;
3266 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3270 if (ost->encoding_needed) {
3271 AVCodec *codec = ost->enc;
3272 AVCodecContext *dec = NULL;
3275 ret = init_output_stream_encode(ost);
3279 if ((ist = get_input_stream(ost)))
3281 if (dec && dec->subtitle_header) {
3282 /* ASS code assumes this buffer is null terminated so add extra byte. */
3283 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3284 if (!ost->enc_ctx->subtitle_header)
3285 return AVERROR(ENOMEM);
3286 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3287 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3289 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3290 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3291 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3293 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3294 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3295 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3297 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter)) {
3298 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3299 if (!ost->enc_ctx->hw_frames_ctx)
3300 return AVERROR(ENOMEM);
3303 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3304 if (ret == AVERROR_EXPERIMENTAL)
3305 abort_codec_experimental(codec, 1);
3306 snprintf(error, error_len,
3307 "Error while opening encoder for output stream #%d:%d - "
3308 "maybe incorrect parameters such as bit_rate, rate, width or height",
3309 ost->file_index, ost->index);
3312 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3313 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3314 av_buffersink_set_frame_size(ost->filter->filter,
3315 ost->enc_ctx->frame_size);
3316 assert_avoptions(ost->encoder_opts);
3317 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3318 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3319 " It takes bits/s as argument, not kbits/s\n");
3321 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3323 av_log(NULL, AV_LOG_FATAL,
3324 "Error initializing the output stream codec context.\n");
3328 * FIXME: ost->st->codec should't be needed here anymore.
3330 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3334 if (ost->enc_ctx->nb_coded_side_data) {
3337 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3338 sizeof(*ost->st->side_data));
3339 if (!ost->st->side_data)
3340 return AVERROR(ENOMEM);
3342 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3343 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3344 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3346 sd_dst->data = av_malloc(sd_src->size);
3348 return AVERROR(ENOMEM);
3349 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3350 sd_dst->size = sd_src->size;
3351 sd_dst->type = sd_src->type;
3352 ost->st->nb_side_data++;
3356 // copy timebase while removing common factors
3357 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3358 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3360 // copy estimated duration as a hint to the muxer
3361 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3362 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3364 ost->st->codec->codec= ost->enc_ctx->codec;
3365 } else if (ost->stream_copy) {
3366 ret = init_output_stream_streamcopy(ost);
3371 * FIXME: will the codec context used by the parser during streamcopy
3372 * This should go away with the new parser API.
3374 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3379 // parse user provided disposition, and update stream values
3380 if (ost->disposition) {
3381 static const AVOption opts[] = {
3382 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3383 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3384 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3385 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3386 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3387 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3388 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3389 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3390 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3391 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3392 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3393 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3394 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3395 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3398 static const AVClass class = {
3400 .item_name = av_default_item_name,
3402 .version = LIBAVUTIL_VERSION_INT,
3404 const AVClass *pclass = &class;
3406 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3411 /* initialize bitstream filters for the output stream
3412 * needs to be done here, because the codec id for streamcopy is not
3413 * known until now */
3414 ret = init_output_bsfs(ost);
3418 ost->initialized = 1;
3420 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3427 static void report_new_stream(int input_index, AVPacket *pkt)
3429 InputFile *file = input_files[input_index];
3430 AVStream *st = file->ctx->streams[pkt->stream_index];
3432 if (pkt->stream_index < file->nb_streams_warn)
3434 av_log(file->ctx, AV_LOG_WARNING,
3435 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3436 av_get_media_type_string(st->codecpar->codec_type),
3437 input_index, pkt->stream_index,
3438 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3439 file->nb_streams_warn = pkt->stream_index + 1;
3442 static int transcode_init(void)
3444 int ret = 0, i, j, k;
3445 AVFormatContext *oc;
3448 char error[1024] = {0};
3450 for (i = 0; i < nb_filtergraphs; i++) {
3451 FilterGraph *fg = filtergraphs[i];
3452 for (j = 0; j < fg->nb_outputs; j++) {
3453 OutputFilter *ofilter = fg->outputs[j];
3454 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3456 if (fg->nb_inputs != 1)
3458 for (k = nb_input_streams-1; k >= 0 ; k--)
3459 if (fg->inputs[0]->ist == input_streams[k])
3461 ofilter->ost->source_index = k;
3465 /* init framerate emulation */
3466 for (i = 0; i < nb_input_files; i++) {
3467 InputFile *ifile = input_files[i];
3468 if (ifile->rate_emu)
3469 for (j = 0; j < ifile->nb_streams; j++)
3470 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3473 /* hwaccel transcoding */
3474 for (i = 0; i < nb_output_streams; i++) {
3475 ost = output_streams[i];
3477 if (!ost->stream_copy) {
3479 if (qsv_transcode_init(ost))
3484 if (cuvid_transcode_init(ost))
3490 /* init input streams */
3491 for (i = 0; i < nb_input_streams; i++)
3492 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3493 for (i = 0; i < nb_output_streams; i++) {
3494 ost = output_streams[i];
3495 avcodec_close(ost->enc_ctx);
3500 /* open each encoder */
3501 for (i = 0; i < nb_output_streams; i++) {
3502 ret = init_output_stream(output_streams[i], error, sizeof(error));
3507 /* discard unused programs */
3508 for (i = 0; i < nb_input_files; i++) {
3509 InputFile *ifile = input_files[i];
3510 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3511 AVProgram *p = ifile->ctx->programs[j];
3512 int discard = AVDISCARD_ALL;
3514 for (k = 0; k < p->nb_stream_indexes; k++)
3515 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3516 discard = AVDISCARD_DEFAULT;
3519 p->discard = discard;
3523 /* write headers for files with no streams */
3524 for (i = 0; i < nb_output_files; i++) {
3525 oc = output_files[i]->ctx;
3526 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3527 ret = check_init_output_file(output_files[i], i);
3534 /* dump the stream mapping */
3535 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3536 for (i = 0; i < nb_input_streams; i++) {
3537 ist = input_streams[i];
3539 for (j = 0; j < ist->nb_filters; j++) {
3540 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3541 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3542 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3543 ist->filters[j]->name);
3544 if (nb_filtergraphs > 1)
3545 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3546 av_log(NULL, AV_LOG_INFO, "\n");
3551 for (i = 0; i < nb_output_streams; i++) {
3552 ost = output_streams[i];
3554 if (ost->attachment_filename) {
3555 /* an attached file */
3556 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3557 ost->attachment_filename, ost->file_index, ost->index);
3561 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3562 /* output from a complex graph */
3563 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3564 if (nb_filtergraphs > 1)
3565 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3567 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3568 ost->index, ost->enc ? ost->enc->name : "?");
3572 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3573 input_streams[ost->source_index]->file_index,
3574 input_streams[ost->source_index]->st->index,
3577 if (ost->sync_ist != input_streams[ost->source_index])
3578 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3579 ost->sync_ist->file_index,
3580 ost->sync_ist->st->index);
3581 if (ost->stream_copy)
3582 av_log(NULL, AV_LOG_INFO, " (copy)");
3584 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3585 const AVCodec *out_codec = ost->enc;
3586 const char *decoder_name = "?";
3587 const char *in_codec_name = "?";
3588 const char *encoder_name = "?";
3589 const char *out_codec_name = "?";
3590 const AVCodecDescriptor *desc;
3593 decoder_name = in_codec->name;
3594 desc = avcodec_descriptor_get(in_codec->id);
3596 in_codec_name = desc->name;
3597 if (!strcmp(decoder_name, in_codec_name))
3598 decoder_name = "native";
3602 encoder_name = out_codec->name;
3603 desc = avcodec_descriptor_get(out_codec->id);
3605 out_codec_name = desc->name;
3606 if (!strcmp(encoder_name, out_codec_name))
3607 encoder_name = "native";
3610 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3611 in_codec_name, decoder_name,
3612 out_codec_name, encoder_name);
3614 av_log(NULL, AV_LOG_INFO, "\n");
3618 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3622 transcode_init_done = 1;
3627 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3628 static int need_output(void)
3632 for (i = 0; i < nb_output_streams; i++) {
3633 OutputStream *ost = output_streams[i];
3634 OutputFile *of = output_files[ost->file_index];
3635 AVFormatContext *os = output_files[ost->file_index]->ctx;
3637 if (ost->finished ||
3638 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3640 if (ost->frame_number >= ost->max_frames) {
3642 for (j = 0; j < of->ctx->nb_streams; j++)
3643 close_output_stream(output_streams[of->ost_index + j]);
3654 * Select the output stream to process.
3656 * @return selected output stream, or NULL if none available
3658 static OutputStream *choose_output(void)
3661 int64_t opts_min = INT64_MAX;
3662 OutputStream *ost_min = NULL;
3664 for (i = 0; i < nb_output_streams; i++) {
3665 OutputStream *ost = output_streams[i];
3666 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3667 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3669 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3670 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3672 if (!ost->finished && opts < opts_min) {
3674 ost_min = ost->unavailable ? NULL : ost;
3680 static void set_tty_echo(int on)
3684 if (tcgetattr(0, &tty) == 0) {
3685 if (on) tty.c_lflag |= ECHO;
3686 else tty.c_lflag &= ~ECHO;
3687 tcsetattr(0, TCSANOW, &tty);
3692 static int check_keyboard_interaction(int64_t cur_time)
3695 static int64_t last_time;
3696 if (received_nb_signals)
3697 return AVERROR_EXIT;
3698 /* read_key() returns 0 on EOF */
3699 if(cur_time - last_time >= 100000 && !run_as_daemon){
3701 last_time = cur_time;
3705 return AVERROR_EXIT;
3706 if (key == '+') av_log_set_level(av_log_get_level()+10);
3707 if (key == '-') av_log_set_level(av_log_get_level()-10);
3708 if (key == 's') qp_hist ^= 1;
3711 do_hex_dump = do_pkt_dump = 0;
3712 } else if(do_pkt_dump){
3716 av_log_set_level(AV_LOG_DEBUG);
3718 if (key == 'c' || key == 'C'){
3719 char buf[4096], target[64], command[256], arg[256] = {0};
3722 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3725 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3730 fprintf(stderr, "\n");
3732 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3733 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3734 target, time, command, arg);
3735 for (i = 0; i < nb_filtergraphs; i++) {
3736 FilterGraph *fg = filtergraphs[i];
3739 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3740 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3741 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3742 } else if (key == 'c') {
3743 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3744 ret = AVERROR_PATCHWELCOME;
3746 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3748 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3753 av_log(NULL, AV_LOG_ERROR,
3754 "Parse error, at least 3 arguments were expected, "
3755 "only %d given in string '%s'\n", n, buf);
3758 if (key == 'd' || key == 'D'){
3761 debug = input_streams[0]->st->codec->debug<<1;
3762 if(!debug) debug = 1;
3763 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3770 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3775 fprintf(stderr, "\n");
3776 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3777 fprintf(stderr,"error parsing debug value\n");
3779 for(i=0;i<nb_input_streams;i++) {
3780 input_streams[i]->st->codec->debug = debug;
3782 for(i=0;i<nb_output_streams;i++) {
3783 OutputStream *ost = output_streams[i];
3784 ost->enc_ctx->debug = debug;
3786 if(debug) av_log_set_level(AV_LOG_DEBUG);
3787 fprintf(stderr,"debug=%d\n", debug);
3790 fprintf(stderr, "key function\n"
3791 "? show this help\n"
3792 "+ increase verbosity\n"
3793 "- decrease verbosity\n"
3794 "c Send command to first matching filter supporting it\n"
3795 "C Send/Queue command to all matching filters\n"
3796 "D cycle through available debug modes\n"
3797 "h dump packets/hex press to cycle through the 3 states\n"
3799 "s Show QP histogram\n"
3806 static void *input_thread(void *arg)
3809 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3814 ret = av_read_frame(f->ctx, &pkt);
3816 if (ret == AVERROR(EAGAIN)) {
3821 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3824 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3825 if (flags && ret == AVERROR(EAGAIN)) {
3827 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3828 av_log(f->ctx, AV_LOG_WARNING,
3829 "Thread message queue blocking; consider raising the "
3830 "thread_queue_size option (current value: %d)\n",
3831 f->thread_queue_size);
3834 if (ret != AVERROR_EOF)
3835 av_log(f->ctx, AV_LOG_ERROR,
3836 "Unable to send packet to main thread: %s\n",
3838 av_packet_unref(&pkt);
3839 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3847 static void free_input_threads(void)
3851 for (i = 0; i < nb_input_files; i++) {
3852 InputFile *f = input_files[i];
3855 if (!f || !f->in_thread_queue)
3857 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3858 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3859 av_packet_unref(&pkt);
3861 pthread_join(f->thread, NULL);
3863 av_thread_message_queue_free(&f->in_thread_queue);
3867 static int init_input_threads(void)
3871 if (nb_input_files == 1)
3874 for (i = 0; i < nb_input_files; i++) {
3875 InputFile *f = input_files[i];
3877 if (f->ctx->pb ? !f->ctx->pb->seekable :
3878 strcmp(f->ctx->iformat->name, "lavfi"))
3879 f->non_blocking = 1;
3880 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3881 f->thread_queue_size, sizeof(AVPacket));
3885 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3886 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3887 av_thread_message_queue_free(&f->in_thread_queue);
3888 return AVERROR(ret);
3894 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3896 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3898 AV_THREAD_MESSAGE_NONBLOCK : 0);
3902 static int get_input_packet(InputFile *f, AVPacket *pkt)
3906 for (i = 0; i < f->nb_streams; i++) {
3907 InputStream *ist = input_streams[f->ist_index + i];
3908 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3909 int64_t now = av_gettime_relative() - ist->start;
3911 return AVERROR(EAGAIN);
3916 if (nb_input_files > 1)
3917 return get_input_packet_mt(f, pkt);
3919 return av_read_frame(f->ctx, pkt);
3922 static int got_eagain(void)
3925 for (i = 0; i < nb_output_streams; i++)
3926 if (output_streams[i]->unavailable)
3931 static void reset_eagain(void)
3934 for (i = 0; i < nb_input_files; i++)
3935 input_files[i]->eagain = 0;
3936 for (i = 0; i < nb_output_streams; i++)
3937 output_streams[i]->unavailable = 0;
3940 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3941 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3942 AVRational time_base)
3948 return tmp_time_base;
3951 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3954 return tmp_time_base;
3960 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3963 AVCodecContext *avctx;
3964 int i, ret, has_audio = 0;
3965 int64_t duration = 0;
3967 ret = av_seek_frame(is, -1, is->start_time, 0);
3971 for (i = 0; i < ifile->nb_streams; i++) {
3972 ist = input_streams[ifile->ist_index + i];
3973 avctx = ist->dec_ctx;
3976 if (ist->decoding_needed) {
3977 process_input_packet(ist, NULL, 1);
3978 avcodec_flush_buffers(avctx);
3981 /* duration is the length of the last frame in a stream
3982 * when audio stream is present we don't care about
3983 * last video frame length because it's not defined exactly */
3984 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3988 for (i = 0; i < ifile->nb_streams; i++) {
3989 ist = input_streams[ifile->ist_index + i];
3990 avctx = ist->dec_ctx;
3993 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3994 AVRational sample_rate = {1, avctx->sample_rate};
3996 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4000 if (ist->framerate.num) {
4001 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4002 } else if (ist->st->avg_frame_rate.num) {
4003 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4004 } else duration = 1;
4006 if (!ifile->duration)
4007 ifile->time_base = ist->st->time_base;
4008 /* the total duration of the stream, max_pts - min_pts is
4009 * the duration of the stream without the last frame */
4010 duration += ist->max_pts - ist->min_pts;
4011 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4015 if (ifile->loop > 0)
4023 * - 0 -- one packet was read and processed
4024 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4025 * this function should be called again
4026 * - AVERROR_EOF -- this function should not be called again
4028 static int process_input(int file_index)
4030 InputFile *ifile = input_files[file_index];
4031 AVFormatContext *is;
4039 ret = get_input_packet(ifile, &pkt);
4041 if (ret == AVERROR(EAGAIN)) {
4045 if (ret < 0 && ifile->loop) {
4046 if ((ret = seek_to_start(ifile, is)) < 0)
4048 ret = get_input_packet(ifile, &pkt);
4049 if (ret == AVERROR(EAGAIN)) {
4055 if (ret != AVERROR_EOF) {
4056 print_error(is->filename, ret);
4061 for (i = 0; i < ifile->nb_streams; i++) {
4062 ist = input_streams[ifile->ist_index + i];
4063 if (ist->decoding_needed) {
4064 ret = process_input_packet(ist, NULL, 0);
4069 /* mark all outputs that don't go through lavfi as finished */
4070 for (j = 0; j < nb_output_streams; j++) {
4071 OutputStream *ost = output_streams[j];
4073 if (ost->source_index == ifile->ist_index + i &&
4074 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4075 finish_output_stream(ost);
4079 ifile->eof_reached = 1;
4080 return AVERROR(EAGAIN);
4086 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4087 is->streams[pkt.stream_index]);
4089 /* the following test is needed in case new streams appear
4090 dynamically in stream : we ignore them */
4091 if (pkt.stream_index >= ifile->nb_streams) {
4092 report_new_stream(file_index, &pkt);
4093 goto discard_packet;
4096 ist = input_streams[ifile->ist_index + pkt.stream_index];
4098 ist->data_size += pkt.size;
4102 goto discard_packet;
4104 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4105 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4110 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4111 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4112 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4113 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4114 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4115 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4116 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4117 av_ts2str(input_files[ist->file_index]->ts_offset),
4118 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4121 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4122 int64_t stime, stime2;
4123 // Correcting starttime based on the enabled streams
4124 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4125 // so we instead do it here as part of discontinuity handling
4126 if ( ist->next_dts == AV_NOPTS_VALUE
4127 && ifile->ts_offset == -is->start_time
4128 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4129 int64_t new_start_time = INT64_MAX;
4130 for (i=0; i<is->nb_streams; i++) {
4131 AVStream *st = is->streams[i];
4132 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4134 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4136 if (new_start_time > is->start_time) {
4137 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4138 ifile->ts_offset = -new_start_time;
4142 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4143 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4144 ist->wrap_correction_done = 1;
4146 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4147 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4148 ist->wrap_correction_done = 0;
4150 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4151 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4152 ist->wrap_correction_done = 0;
4156 /* add the stream-global side data to the first packet */
4157 if (ist->nb_packets == 1) {
4158 if (ist->st->nb_side_data)
4159 av_packet_split_side_data(&pkt);
4160 for (i = 0; i < ist->st->nb_side_data; i++) {
4161 AVPacketSideData *src_sd = &ist->st->side_data[i];
4164 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4166 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4169 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4173 memcpy(dst_data, src_sd->data, src_sd->size);
4177 if (pkt.dts != AV_NOPTS_VALUE)
4178 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4179 if (pkt.pts != AV_NOPTS_VALUE)
4180 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4182 if (pkt.pts != AV_NOPTS_VALUE)
4183 pkt.pts *= ist->ts_scale;
4184 if (pkt.dts != AV_NOPTS_VALUE)
4185 pkt.dts *= ist->ts_scale;
4187 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4188 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4189 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4190 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4191 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4192 int64_t delta = pkt_dts - ifile->last_ts;
4193 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4194 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4195 ifile->ts_offset -= delta;
4196 av_log(NULL, AV_LOG_DEBUG,
4197 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4198 delta, ifile->ts_offset);
4199 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4200 if (pkt.pts != AV_NOPTS_VALUE)
4201 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4205 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4206 if (pkt.pts != AV_NOPTS_VALUE) {
4207 pkt.pts += duration;
4208 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4209 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4212 if (pkt.dts != AV_NOPTS_VALUE)
4213 pkt.dts += duration;
4215 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4216 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4217 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4218 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4220 int64_t delta = pkt_dts - ist->next_dts;
4221 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4222 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4223 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4224 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4225 ifile->ts_offset -= delta;
4226 av_log(NULL, AV_LOG_DEBUG,
4227 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4228 delta, ifile->ts_offset);
4229 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4230 if (pkt.pts != AV_NOPTS_VALUE)
4231 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4234 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4235 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4236 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4237 pkt.dts = AV_NOPTS_VALUE;
4239 if (pkt.pts != AV_NOPTS_VALUE){
4240 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4241 delta = pkt_pts - ist->next_dts;
4242 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4243 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4244 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4245 pkt.pts = AV_NOPTS_VALUE;
4251 if (pkt.dts != AV_NOPTS_VALUE)
4252 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4255 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4256 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4257 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4258 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4259 av_ts2str(input_files[ist->file_index]->ts_offset),
4260 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4263 sub2video_heartbeat(ist, pkt.pts);
4265 process_input_packet(ist, &pkt, 0);
4268 av_packet_unref(&pkt);
4274 * Perform a step of transcoding for the specified filter graph.
4276 * @param[in] graph filter graph to consider
4277 * @param[out] best_ist input stream where a frame would allow to continue
4278 * @return 0 for success, <0 for error
4280 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4283 int nb_requests, nb_requests_max = 0;
4284 InputFilter *ifilter;
4288 ret = avfilter_graph_request_oldest(graph->graph);
4290 return reap_filters(0);
4292 if (ret == AVERROR_EOF) {
4293 ret = reap_filters(1);
4294 for (i = 0; i < graph->nb_outputs; i++)
4295 close_output_stream(graph->outputs[i]->ost);
4298 if (ret != AVERROR(EAGAIN))
4301 for (i = 0; i < graph->nb_inputs; i++) {
4302 ifilter = graph->inputs[i];
4304 if (input_files[ist->file_index]->eagain ||
4305 input_files[ist->file_index]->eof_reached)
4307 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4308 if (nb_requests > nb_requests_max) {
4309 nb_requests_max = nb_requests;
4315 for (i = 0; i < graph->nb_outputs; i++)
4316 graph->outputs[i]->ost->unavailable = 1;
4322 * Run a single step of transcoding.
4324 * @return 0 for success, <0 for error
4326 static int transcode_step(void)
4332 ost = choose_output();
4339 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4344 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4349 av_assert0(ost->source_index >= 0);
4350 ist = input_streams[ost->source_index];
4353 ret = process_input(ist->file_index);
4354 if (ret == AVERROR(EAGAIN)) {
4355 if (input_files[ist->file_index]->eagain)
4356 ost->unavailable = 1;
4361 return ret == AVERROR_EOF ? 0 : ret;
4363 return reap_filters(0);
4367 * The following code is the main loop of the file converter
4369 static int transcode(void)