2 * Copyright (c) 2000-2003 Fabrice Bellard
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * multimedia converter based on the FFmpeg libraries
33 #include <stdatomic.h>
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/threadmessage.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
72 #if HAVE_SYS_RESOURCE_H
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
79 #if HAVE_GETPROCESSMEMORYINFO
83 #if HAVE_SETCONSOLECTRLHANDLER
89 #include <sys/select.h>
94 #include <sys/ioctl.h>
108 #include "cmdutils.h"
110 #include "libavutil/avassert.h"
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
115 static FILE *vstats_file;
117 const char *const forced_keyframes_const_names[] = {
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 static int ifilter_has_all_input_formats(FilterGraph *fg);
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
137 static int want_sdp = 1;
139 static int current_time;
140 AVIOContext *progress_avio = NULL;
142 static uint8_t *subtitle_out;
144 InputStream **input_streams = NULL;
145 int nb_input_streams = 0;
146 InputFile **input_files = NULL;
147 int nb_input_files = 0;
149 OutputStream **output_streams = NULL;
150 int nb_output_streams = 0;
151 OutputFile **output_files = NULL;
152 int nb_output_files = 0;
154 FilterGraph **filtergraphs;
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
165 static void free_input_threads(void);
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
173 static int sub2video_get_blank_frame(InputStream *ist)
176 AVFrame *frame = ist->sub2video.frame;
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
191 uint32_t *pal, *dst2;
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
206 dst += r->y * dst_linesize + r->x * 4;
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
215 src += r->linesize[0];
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
221 AVFrame *frame = ist->sub2video.frame;
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
232 void sub2video_update(InputStream *ist, AVSubtitle *sub)
234 AVFrame *frame = ist->sub2video.frame;
238 int64_t pts, end_pts;
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
249 pts = ist->sub2video.end_pts;
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
268 InputFile *infile = input_files[ist->file_index];
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
291 sub2video_push_ref(ist2, pts2);
295 static void sub2video_flush(InputStream *ist)
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
305 /* end of sub2video hack */
307 static void term_exit_sigsafe(void)
311 tcsetattr (0, TCSANOW, &oldtty);
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
323 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
328 sigterm_handler(int sig)
330 received_sigterm = sig;
331 received_nb_signals++;
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376 if (!run_as_daemon && stdin_interaction) {
378 if (tcgetattr (0, &tty) == 0) {
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
391 tcsetattr (0, TCSANOW, &tty);
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
400 signal(SIGXCPU, sigterm_handler);
402 #if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
407 /* read a key without blocking */
408 static int read_key(void)
420 n = select(1, &rfds, NULL, NULL, &tv);
429 # if HAVE_PEEKNAMEDPIPE
431 static HANDLE input_handle;
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
459 static int decode_interrupt_cb(void *ctx)
461 return received_nb_signals > atomic_load(&transcode_init_done);
464 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
466 static void ffmpeg_cleanup(int ret)
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
485 av_fifo_freep(&fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
493 av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
510 av_freep(&filtergraphs[i]);
512 av_freep(&filtergraphs);
514 av_freep(&subtitle_out);
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
528 av_freep(&output_files[i]);
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
540 av_frame_free(&ost->filtered_frame);
541 av_frame_free(&ost->last_frame);
542 av_dict_free(&ost->encoder_opts);
544 av_parser_close(ost->parser);
545 avcodec_free_context(&ost->parser_avctx);
547 av_freep(&ost->forced_keyframes);
548 av_expr_free(ost->forced_keyframes_pexpr);
549 av_freep(&ost->avfilter);
550 av_freep(&ost->logfile_prefix);
552 av_freep(&ost->audio_channels_map);
553 ost->audio_channels_mapped = 0;
555 av_dict_free(&ost->sws_dict);
557 avcodec_free_context(&ost->enc_ctx);
558 avcodec_parameters_free(&ost->ref_par);
560 if (ost->muxing_queue) {
561 while (av_fifo_size(ost->muxing_queue)) {
563 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564 av_packet_unref(&pkt);
566 av_fifo_freep(&ost->muxing_queue);
569 av_freep(&output_streams[i]);
572 free_input_threads();
574 for (i = 0; i < nb_input_files; i++) {
575 avformat_close_input(&input_files[i]->ctx);
576 av_freep(&input_files[i]);
578 for (i = 0; i < nb_input_streams; i++) {
579 InputStream *ist = input_streams[i];
581 av_frame_free(&ist->decoded_frame);
582 av_frame_free(&ist->filter_frame);
583 av_dict_free(&ist->decoder_opts);
584 avsubtitle_free(&ist->prev_sub.subtitle);
585 av_frame_free(&ist->sub2video.frame);
586 av_freep(&ist->filters);
587 av_freep(&ist->hwaccel_device);
588 av_freep(&ist->dts_buffer);
590 avcodec_free_context(&ist->dec_ctx);
592 av_freep(&input_streams[i]);
596 if (fclose(vstats_file))
597 av_log(NULL, AV_LOG_ERROR,
598 "Error closing vstats file, loss of information possible: %s\n",
599 av_err2str(AVERROR(errno)));
601 av_freep(&vstats_filename);
603 av_freep(&input_streams);
604 av_freep(&input_files);
605 av_freep(&output_streams);
606 av_freep(&output_files);
610 avformat_network_deinit();
612 if (received_sigterm) {
613 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614 (int) received_sigterm);
615 } else if (ret && atomic_load(&transcode_init_done)) {
616 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
622 void remove_avoptions(AVDictionary **a, AVDictionary *b)
624 AVDictionaryEntry *t = NULL;
626 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
627 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
631 void assert_avoptions(AVDictionary *m)
633 AVDictionaryEntry *t;
634 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
640 static void abort_codec_experimental(AVCodec *c, int encoder)
645 static void update_benchmark(const char *fmt, ...)
647 if (do_benchmark_all) {
648 int64_t t = getutime();
654 vsnprintf(buf, sizeof(buf), fmt, va);
656 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
665 for (i = 0; i < nb_output_streams; i++) {
666 OutputStream *ost2 = output_streams[i];
667 ost2->finished |= ost == ost2 ? this_stream : others;
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673 AVFormatContext *s = of->ctx;
674 AVStream *st = ost->st;
678 * Audio encoders may split the packets -- #frames in != #packets out.
679 * But there is no reordering, so we can limit the number of output packets
680 * by simply dropping them here.
681 * Counting encoded video frames needs to be done separately because of
682 * reordering, see do_video_out().
683 * Do not count the packet when unqueued because it has been counted when queued.
685 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686 if (ost->frame_number >= ost->max_frames) {
687 av_packet_unref(pkt);
693 if (!of->header_written) {
694 AVPacket tmp_pkt = {0};
695 /* the muxer is not initialized yet, buffer the packet */
696 if (!av_fifo_space(ost->muxing_queue)) {
697 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698 ost->max_muxing_queue_size);
699 if (new_size <= av_fifo_size(ost->muxing_queue)) {
700 av_log(NULL, AV_LOG_ERROR,
701 "Too many packets buffered for output stream %d:%d.\n",
702 ost->file_index, ost->st->index);
705 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
709 ret = av_packet_ref(&tmp_pkt, pkt);
712 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713 av_packet_unref(pkt);
717 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
718 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
719 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 ost->quality = sd ? AV_RL32(sd) : -1;
726 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 ost->error[i] = AV_RL64(sd + 8 + 8*i);
735 if (ost->frame_rate.num && ost->is_cfr) {
736 if (pkt->duration > 0)
737 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
743 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746 if (pkt->dts != AV_NOPTS_VALUE &&
747 pkt->pts != AV_NOPTS_VALUE &&
748 pkt->dts > pkt->pts) {
749 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 ost->file_index, ost->st->index);
753 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
758 pkt->dts != AV_NOPTS_VALUE &&
759 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760 ost->last_mux_dts != AV_NOPTS_VALUE) {
761 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762 if (pkt->dts < max) {
763 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764 av_log(s, loglevel, "Non-monotonous DTS in output stream "
765 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
771 av_log(s, loglevel, "changing to %"PRId64". This may result "
772 "in incorrect timestamps in the output file.\n",
774 if (pkt->pts >= pkt->dts)
775 pkt->pts = FFMAX(pkt->pts, max);
780 ost->last_mux_dts = pkt->dts;
782 ost->data_size += pkt->size;
783 ost->packets_written++;
785 pkt->stream_index = ost->index;
788 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
790 av_get_media_type_string(ost->enc_ctx->codec_type),
791 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
797 ret = av_interleaved_write_frame(s, pkt);
799 print_error("av_interleaved_write_frame()", ret);
800 main_return_code = 1;
801 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 av_packet_unref(pkt);
806 static void close_output_stream(OutputStream *ost)
808 OutputFile *of = output_files[ost->file_index];
810 ost->finished |= ENCODER_FINISHED;
812 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813 of->recording_time = FFMIN(of->recording_time, end);
818 * Send a single packet to the output, applying any bitstream filters
819 * associated with the output stream. This may result in any number
820 * of packets actually being written, depending on what bitstream
821 * filters are applied. The supplied packet is consumed and will be
822 * blank (as if newly-allocated) when this function returns.
824 * If eof is set, instead indicate EOF to all bitstream filters and
825 * therefore flush any delayed packets to the output. A blank packet
826 * must be supplied in this case.
828 static void output_packet(OutputFile *of, AVPacket *pkt,
829 OutputStream *ost, int eof)
833 /* apply the output bitstream filters, if any */
834 if (ost->nb_bitstream_filters) {
837 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
844 /* get a packet from the previous filter up the chain */
845 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846 if (ret == AVERROR(EAGAIN)) {
850 } else if (ret == AVERROR_EOF) {
855 /* send it to the next filter down the chain or to the muxer */
856 if (idx < ost->nb_bitstream_filters) {
857 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
865 write_packet(of, pkt, ost, 0);
868 write_packet(of, pkt, ost, 0);
871 if (ret < 0 && ret != AVERROR_EOF) {
872 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
879 static int check_recording_time(OutputStream *ost)
881 OutputFile *of = output_files[ost->file_index];
883 if (of->recording_time != INT64_MAX &&
884 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
885 AV_TIME_BASE_Q) >= 0) {
886 close_output_stream(ost);
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
895 AVCodecContext *enc = ost->enc_ctx;
899 av_init_packet(&pkt);
903 if (!check_recording_time(ost))
906 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907 frame->pts = ost->sync_opts;
908 ost->sync_opts = frame->pts + frame->nb_samples;
909 ost->samples_encoded += frame->nb_samples;
910 ost->frames_encoded++;
912 av_assert0(pkt.size || !pkt.data);
913 update_benchmark(NULL);
915 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918 enc->time_base.num, enc->time_base.den);
921 ret = avcodec_send_frame(enc, frame);
926 ret = avcodec_receive_packet(enc, &pkt);
927 if (ret == AVERROR(EAGAIN))
932 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
934 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
937 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
943 output_packet(of, &pkt, ost, 0);
948 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
952 static void do_subtitle_out(OutputFile *of,
956 int subtitle_out_max_size = 1024 * 1024;
957 int subtitle_out_size, nb, i;
962 if (sub->pts == AV_NOPTS_VALUE) {
963 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972 subtitle_out = av_malloc(subtitle_out_max_size);
974 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
979 /* Note: DVB subtitle need one packet to draw them and one other
980 packet to clear them */
981 /* XXX: signal it in the codec context ? */
982 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
987 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
989 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990 pts -= output_files[ost->file_index]->start_time;
991 for (i = 0; i < nb; i++) {
992 unsigned save_num_rects = sub->num_rects;
994 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995 if (!check_recording_time(ost))
999 // start_display_time is required to be 0
1000 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001 sub->end_display_time -= sub->start_display_time;
1002 sub->start_display_time = 0;
1006 ost->frames_encoded++;
1008 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009 subtitle_out_max_size, sub);
1011 sub->num_rects = save_num_rects;
1012 if (subtitle_out_size < 0) {
1013 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1017 av_init_packet(&pkt);
1018 pkt.data = subtitle_out;
1019 pkt.size = subtitle_out_size;
1020 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023 /* XXX: the pts correction is handled here. Maybe handling
1024 it in the codec would be better */
1026 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1031 output_packet(of, &pkt, ost, 0);
1035 static void do_video_out(OutputFile *of,
1037 AVFrame *next_picture,
1040 int ret, format_video_sync;
1042 AVCodecContext *enc = ost->enc_ctx;
1043 AVCodecParameters *mux_par = ost->st->codecpar;
1044 AVRational frame_rate;
1045 int nb_frames, nb0_frames, i;
1046 double delta, delta0;
1047 double duration = 0;
1049 InputStream *ist = NULL;
1050 AVFilterContext *filter = ost->filter->filter;
1052 if (ost->source_index >= 0)
1053 ist = input_streams[ost->source_index];
1055 frame_rate = av_buffersink_get_frame_rate(filter);
1056 if (frame_rate.num > 0 && frame_rate.den > 0)
1057 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1059 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1062 if (!ost->filters_script &&
1066 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1070 if (!next_picture) {
1072 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073 ost->last_nb0_frames[1],
1074 ost->last_nb0_frames[2]);
1076 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077 delta = delta0 + duration;
1079 /* by default, we output a single frame */
1080 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1083 format_video_sync = video_sync_method;
1084 if (format_video_sync == VSYNC_AUTO) {
1085 if(!strcmp(of->ctx->oformat->name, "avi")) {
1086 format_video_sync = VSYNC_VFR;
1088 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1090 && format_video_sync == VSYNC_CFR
1091 && input_files[ist->file_index]->ctx->nb_streams == 1
1092 && input_files[ist->file_index]->input_ts_offset == 0) {
1093 format_video_sync = VSYNC_VSCFR;
1095 if (format_video_sync == VSYNC_CFR && copy_ts) {
1096 format_video_sync = VSYNC_VSCFR;
1099 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1103 format_video_sync != VSYNC_PASSTHROUGH &&
1104 format_video_sync != VSYNC_DROP) {
1105 if (delta0 < -0.6) {
1106 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1108 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109 sync_ipts = ost->sync_opts;
1114 switch (format_video_sync) {
1116 if (ost->frame_number == 0 && delta0 >= 0.5) {
1117 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1120 ost->sync_opts = lrint(sync_ipts);
1123 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1126 } else if (delta < -1.1)
1128 else if (delta > 1.1) {
1129 nb_frames = lrintf(delta);
1131 nb0_frames = lrintf(delta0 - 0.6);
1137 else if (delta > 0.6)
1138 ost->sync_opts = lrint(sync_ipts);
1141 case VSYNC_PASSTHROUGH:
1142 ost->sync_opts = lrint(sync_ipts);
1149 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150 nb0_frames = FFMIN(nb0_frames, nb_frames);
1152 memmove(ost->last_nb0_frames + 1,
1153 ost->last_nb0_frames,
1154 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155 ost->last_nb0_frames[0] = nb0_frames;
1157 if (nb0_frames == 0 && ost->last_dropped) {
1159 av_log(NULL, AV_LOG_VERBOSE,
1160 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161 ost->frame_number, ost->st->index, ost->last_frame->pts);
1163 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164 if (nb_frames > dts_error_threshold * 30) {
1165 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1169 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171 if (nb_frames_dup > dup_warning) {
1172 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1176 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1178 /* duplicates frame if needed */
1179 for (i = 0; i < nb_frames; i++) {
1180 AVFrame *in_picture;
1181 av_init_packet(&pkt);
1185 if (i < nb0_frames && ost->last_frame) {
1186 in_picture = ost->last_frame;
1188 in_picture = next_picture;
1193 in_picture->pts = ost->sync_opts;
1196 if (!check_recording_time(ost))
1198 if (ost->frame_number >= ost->max_frames)
1202 #if FF_API_LAVF_FMT_RAWPICTURE
1203 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1204 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1205 /* raw pictures are written as AVPicture structure to
1206 avoid any copies. We support temporarily the older
1208 if (in_picture->interlaced_frame)
1209 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1211 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1212 pkt.data = (uint8_t *)in_picture;
1213 pkt.size = sizeof(AVPicture);
1214 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1215 pkt.flags |= AV_PKT_FLAG_KEY;
1217 output_packet(of, &pkt, ost, 0);
1221 int forced_keyframe = 0;
1224 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1225 ost->top_field_first >= 0)
1226 in_picture->top_field_first = !!ost->top_field_first;
1228 if (in_picture->interlaced_frame) {
1229 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 in_picture->quality = enc->global_quality;
1237 in_picture->pict_type = 0;
1239 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240 in_picture->pts * av_q2d(enc->time_base) : NAN;
1241 if (ost->forced_kf_index < ost->forced_kf_count &&
1242 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243 ost->forced_kf_index++;
1244 forced_keyframe = 1;
1245 } else if (ost->forced_keyframes_pexpr) {
1247 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248 res = av_expr_eval(ost->forced_keyframes_pexpr,
1249 ost->forced_keyframes_expr_const_values, NULL);
1250 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251 ost->forced_keyframes_expr_const_values[FKF_N],
1252 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254 ost->forced_keyframes_expr_const_values[FKF_T],
1255 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1258 forced_keyframe = 1;
1259 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260 ost->forced_keyframes_expr_const_values[FKF_N];
1261 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262 ost->forced_keyframes_expr_const_values[FKF_T];
1263 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1266 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267 } else if ( ost->forced_keyframes
1268 && !strncmp(ost->forced_keyframes, "source", 6)
1269 && in_picture->key_frame==1) {
1270 forced_keyframe = 1;
1273 if (forced_keyframe) {
1274 in_picture->pict_type = AV_PICTURE_TYPE_I;
1275 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1278 update_benchmark(NULL);
1280 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283 enc->time_base.num, enc->time_base.den);
1286 ost->frames_encoded++;
1288 ret = avcodec_send_frame(enc, in_picture);
1293 ret = avcodec_receive_packet(enc, &pkt);
1294 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295 if (ret == AVERROR(EAGAIN))
1301 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1307 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308 pkt.pts = ost->sync_opts;
1310 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319 frame_size = pkt.size;
1320 output_packet(of, &pkt, ost, 0);
1322 /* if two pass, output log */
1323 if (ost->logfile && enc->stats_out) {
1324 fprintf(ost->logfile, "%s", enc->stats_out);
1330 * For video, number of frames in == number of packets out.
1331 * But there may be reordering, so we can't throw away frames on encoder
1332 * flush, we need to limit them here, before they go into encoder.
1334 ost->frame_number++;
1336 if (vstats_filename && frame_size)
1337 do_video_stats(ost, frame_size);
1340 if (!ost->last_frame)
1341 ost->last_frame = av_frame_alloc();
1342 av_frame_unref(ost->last_frame);
1343 if (next_picture && ost->last_frame)
1344 av_frame_ref(ost->last_frame, next_picture);
1346 av_frame_free(&ost->last_frame);
1350 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1354 static double psnr(double d)
1356 return -10.0 * log10(d);
1359 static void do_video_stats(OutputStream *ost, int frame_size)
1361 AVCodecContext *enc;
1363 double ti1, bitrate, avg_bitrate;
1365 /* this is executed just the first time do_video_stats is called */
1367 vstats_file = fopen(vstats_filename, "w");
1375 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376 frame_number = ost->st->nb_frames;
1377 if (vstats_version <= 1) {
1378 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379 ost->quality / (float)FF_QP2LAMBDA);
1381 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382 ost->quality / (float)FF_QP2LAMBDA);
1385 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 fprintf(vstats_file,"f_size= %6d ", frame_size);
1389 /* compute pts value */
1390 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1394 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 static void finish_output_stream(OutputStream *ost)
1406 OutputFile *of = output_files[ost->file_index];
1409 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1412 for (i = 0; i < of->ctx->nb_streams; i++)
1413 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1418 * Get and encode new output from any of the filtergraphs, without causing
1421 * @return 0 for success, <0 for severe errors
1423 static int reap_filters(int flush)
1425 AVFrame *filtered_frame = NULL;
1428 /* Reap all buffers present in the buffer sinks */
1429 for (i = 0; i < nb_output_streams; i++) {
1430 OutputStream *ost = output_streams[i];
1431 OutputFile *of = output_files[ost->file_index];
1432 AVFilterContext *filter;
1433 AVCodecContext *enc = ost->enc_ctx;
1436 if (!ost->filter || !ost->filter->graph->graph)
1438 filter = ost->filter->filter;
1440 if (!ost->initialized) {
1441 char error[1024] = "";
1442 ret = init_output_stream(ost, error, sizeof(error));
1444 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445 ost->file_index, ost->index, error);
1450 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451 return AVERROR(ENOMEM);
1453 filtered_frame = ost->filtered_frame;
1456 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1458 AV_BUFFERSINK_FLAG_NO_REQUEST);
1460 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1461 av_log(NULL, AV_LOG_WARNING,
1462 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463 } else if (flush && ret == AVERROR_EOF) {
1464 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1465 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1469 if (ost->finished) {
1470 av_frame_unref(filtered_frame);
1473 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475 AVRational filter_tb = av_buffersink_get_time_base(filter);
1476 AVRational tb = enc->time_base;
1477 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 tb.den <<= extra_bits;
1481 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483 float_pts /= 1 << extra_bits;
1484 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 filtered_frame->pts =
1488 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491 //if (ost->source_index >= 0)
1492 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1494 switch (av_buffersink_get_type(filter)) {
1495 case AVMEDIA_TYPE_VIDEO:
1496 if (!ost->frame_aspect_ratio.num)
1497 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1500 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503 enc->time_base.num, enc->time_base.den);
1506 do_video_out(of, ost, filtered_frame, float_pts);
1508 case AVMEDIA_TYPE_AUDIO:
1509 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510 enc->channels != filtered_frame->channels) {
1511 av_log(NULL, AV_LOG_ERROR,
1512 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1515 do_audio_out(of, ost, filtered_frame);
1518 // TODO support subtitle filters
1522 av_frame_unref(filtered_frame);
1529 static void print_final_stats(int64_t total_size)
1531 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532 uint64_t subtitle_size = 0;
1533 uint64_t data_size = 0;
1534 float percent = -1.0;
1538 for (i = 0; i < nb_output_streams; i++) {
1539 OutputStream *ost = output_streams[i];
1540 switch (ost->enc_ctx->codec_type) {
1541 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544 default: other_size += ost->data_size; break;
1546 extra_size += ost->enc_ctx->extradata_size;
1547 data_size += ost->data_size;
1548 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1549 != AV_CODEC_FLAG_PASS1)
1553 if (data_size && total_size>0 && total_size >= data_size)
1554 percent = 100.0 * (total_size - data_size) / data_size;
1556 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557 video_size / 1024.0,
1558 audio_size / 1024.0,
1559 subtitle_size / 1024.0,
1560 other_size / 1024.0,
1561 extra_size / 1024.0);
1563 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565 av_log(NULL, AV_LOG_INFO, "unknown");
1566 av_log(NULL, AV_LOG_INFO, "\n");
1568 /* print verbose per-stream stats */
1569 for (i = 0; i < nb_input_files; i++) {
1570 InputFile *f = input_files[i];
1571 uint64_t total_packets = 0, total_size = 0;
1573 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574 i, f->ctx->filename);
1576 for (j = 0; j < f->nb_streams; j++) {
1577 InputStream *ist = input_streams[f->ist_index + j];
1578 enum AVMediaType type = ist->dec_ctx->codec_type;
1580 total_size += ist->data_size;
1581 total_packets += ist->nb_packets;
1583 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584 i, j, media_type_string(type));
1585 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586 ist->nb_packets, ist->data_size);
1588 if (ist->decoding_needed) {
1589 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590 ist->frames_decoded);
1591 if (type == AVMEDIA_TYPE_AUDIO)
1592 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593 av_log(NULL, AV_LOG_VERBOSE, "; ");
1596 av_log(NULL, AV_LOG_VERBOSE, "\n");
1599 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600 total_packets, total_size);
1603 for (i = 0; i < nb_output_files; i++) {
1604 OutputFile *of = output_files[i];
1605 uint64_t total_packets = 0, total_size = 0;
1607 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608 i, of->ctx->filename);
1610 for (j = 0; j < of->ctx->nb_streams; j++) {
1611 OutputStream *ost = output_streams[of->ost_index + j];
1612 enum AVMediaType type = ost->enc_ctx->codec_type;
1614 total_size += ost->data_size;
1615 total_packets += ost->packets_written;
1617 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618 i, j, media_type_string(type));
1619 if (ost->encoding_needed) {
1620 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621 ost->frames_encoded);
1622 if (type == AVMEDIA_TYPE_AUDIO)
1623 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624 av_log(NULL, AV_LOG_VERBOSE, "; ");
1627 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628 ost->packets_written, ost->data_size);
1630 av_log(NULL, AV_LOG_VERBOSE, "\n");
1633 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634 total_packets, total_size);
1636 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639 av_log(NULL, AV_LOG_WARNING, "\n");
1641 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1649 AVBPrint buf_script;
1651 AVFormatContext *oc;
1653 AVCodecContext *enc;
1654 int frame_number, vid, i;
1657 int64_t pts = INT64_MIN + 1;
1658 static int64_t last_time = -1;
1659 static int qp_histogram[52];
1660 int hours, mins, secs, us;
1664 if (!print_stats && !is_last_report && !progress_avio)
1667 if (!is_last_report) {
1668 if (last_time == -1) {
1669 last_time = cur_time;
1672 if ((cur_time - last_time) < 500000)
1674 last_time = cur_time;
1677 t = (cur_time-timer_start) / 1000000.0;
1680 oc = output_files[0]->ctx;
1682 total_size = avio_size(oc->pb);
1683 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684 total_size = avio_tell(oc->pb);
1688 av_bprint_init(&buf_script, 0, 1);
1689 for (i = 0; i < nb_output_streams; i++) {
1691 ost = output_streams[i];
1693 if (!ost->stream_copy)
1694 q = ost->quality / (float) FF_QP2LAMBDA;
1696 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1698 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699 ost->file_index, ost->index, q);
1701 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1704 frame_number = ost->frame_number;
1705 fps = t > 1 ? frame_number / t : 0;
1706 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1707 frame_number, fps < 9.95, fps, q);
1708 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1710 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711 ost->file_index, ost->index, q);
1713 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1717 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719 for (j = 0; j < 32; j++)
1720 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1723 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725 double error, error_sum = 0;
1726 double scale, scale_sum = 0;
1728 char type[3] = { 'Y','U','V' };
1729 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1730 for (j = 0; j < 3; j++) {
1731 if (is_last_report) {
1732 error = enc->error[j];
1733 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735 error = ost->error[j];
1736 scale = enc->width * enc->height * 255.0 * 255.0;
1742 p = psnr(error / scale);
1743 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1744 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745 ost->file_index, ost->index, type[j] | 32, p);
1747 p = psnr(error_sum / scale_sum);
1748 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1749 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750 ost->file_index, ost->index, p);
1754 /* compute min output value */
1755 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1756 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757 ost->st->time_base, AV_TIME_BASE_Q));
1759 nb_frames_drop += ost->last_dropped;
1762 secs = FFABS(pts) / AV_TIME_BASE;
1763 us = FFABS(pts) % AV_TIME_BASE;
1769 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775 "size=%8.0fkB time=", total_size / 1024.0);
1777 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1779 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1780 (100 * us) / AV_TIME_BASE);
1783 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1784 av_bprintf(&buf_script, "bitrate=N/A\n");
1786 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1787 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1790 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1793 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1794 hours, mins, secs, us);
1796 if (nb_frames_dup || nb_frames_drop)
1797 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1798 nb_frames_dup, nb_frames_drop);
1799 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1800 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1803 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1804 av_bprintf(&buf_script, "speed=N/A\n");
1806 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1807 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1810 if (print_stats || is_last_report) {
1811 const char end = is_last_report ? '\n' : '\r';
1812 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1813 fprintf(stderr, "%s %c", buf, end);
1815 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1820 if (progress_avio) {
1821 av_bprintf(&buf_script, "progress=%s\n",
1822 is_last_report ? "end" : "continue");
1823 avio_write(progress_avio, buf_script.str,
1824 FFMIN(buf_script.len, buf_script.size - 1));
1825 avio_flush(progress_avio);
1826 av_bprint_finalize(&buf_script, NULL);
1827 if (is_last_report) {
1828 if ((ret = avio_closep(&progress_avio)) < 0)
1829 av_log(NULL, AV_LOG_ERROR,
1830 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1835 print_final_stats(total_size);
1838 static void flush_encoders(void)
1842 for (i = 0; i < nb_output_streams; i++) {
1843 OutputStream *ost = output_streams[i];
1844 AVCodecContext *enc = ost->enc_ctx;
1845 OutputFile *of = output_files[ost->file_index];
1847 if (!ost->encoding_needed)
1850 // Try to enable encoding with no input frames.
1851 // Maybe we should just let encoding fail instead.
1852 if (!ost->initialized) {
1853 FilterGraph *fg = ost->filter->graph;
1854 char error[1024] = "";
1856 av_log(NULL, AV_LOG_WARNING,
1857 "Finishing stream %d:%d without any data written to it.\n",
1858 ost->file_index, ost->st->index);
1860 if (ost->filter && !fg->graph) {
1862 for (x = 0; x < fg->nb_inputs; x++) {
1863 InputFilter *ifilter = fg->inputs[x];
1864 if (ifilter->format < 0) {
1865 AVCodecParameters *par = ifilter->ist->st->codecpar;
1866 // We never got any input. Set a fake format, which will
1867 // come from libavformat.
1868 ifilter->format = par->format;
1869 ifilter->sample_rate = par->sample_rate;
1870 ifilter->channels = par->channels;
1871 ifilter->channel_layout = par->channel_layout;
1872 ifilter->width = par->width;
1873 ifilter->height = par->height;
1874 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1878 if (!ifilter_has_all_input_formats(fg))
1881 ret = configure_filtergraph(fg);
1883 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1887 finish_output_stream(ost);
1890 ret = init_output_stream(ost, error, sizeof(error));
1892 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1893 ost->file_index, ost->index, error);
1898 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1900 #if FF_API_LAVF_FMT_RAWPICTURE
1901 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1905 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1909 const char *desc = NULL;
1913 switch (enc->codec_type) {
1914 case AVMEDIA_TYPE_AUDIO:
1917 case AVMEDIA_TYPE_VIDEO:
1924 av_init_packet(&pkt);
1928 update_benchmark(NULL);
1930 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1931 ret = avcodec_send_frame(enc, NULL);
1933 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1940 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1941 if (ret < 0 && ret != AVERROR_EOF) {
1942 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1947 if (ost->logfile && enc->stats_out) {
1948 fprintf(ost->logfile, "%s", enc->stats_out);
1950 if (ret == AVERROR_EOF) {
1951 output_packet(of, &pkt, ost, 1);
1954 if (ost->finished & MUXER_FINISHED) {
1955 av_packet_unref(&pkt);
1958 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959 pkt_size = pkt.size;
1960 output_packet(of, &pkt, ost, 0);
1961 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1962 do_video_stats(ost, pkt_size);
1969 * Check whether a packet from ist should be written into ost at this time
1971 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1973 OutputFile *of = output_files[ost->file_index];
1974 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1976 if (ost->source_index != ist_index)
1982 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1990 OutputFile *of = output_files[ost->file_index];
1991 InputFile *f = input_files [ist->file_index];
1992 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1997 av_init_packet(&opkt);
1999 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000 !ost->copy_initial_nonkeyframes)
2003 if (!ost->frame_number && !ost->copy_prior_start) {
2004 int64_t comp_start = start_time;
2005 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007 if (pkt->pts == AV_NOPTS_VALUE ?
2008 ist->pts < comp_start :
2009 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2013 if (of->recording_time != INT64_MAX &&
2014 ist->pts >= of->recording_time + start_time) {
2015 close_output_stream(ost);
2019 if (f->recording_time != INT64_MAX) {
2020 start_time = f->ctx->start_time;
2021 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022 start_time += f->start_time;
2023 if (ist->pts >= f->recording_time + start_time) {
2024 close_output_stream(ost);
2029 /* force the input stream PTS */
2030 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2036 opkt.pts = AV_NOPTS_VALUE;
2038 if (pkt->dts == AV_NOPTS_VALUE)
2039 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2041 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042 opkt.dts -= ost_tb_start_time;
2044 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2047 duration = ist->dec_ctx->frame_size;
2048 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2050 ost->mux_timebase) - ost_tb_start_time;
2053 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2055 opkt.flags = pkt->flags;
2056 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2062 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063 &opkt.data, &opkt.size,
2064 pkt->data, pkt->size,
2065 pkt->flags & AV_PKT_FLAG_KEY);
2067 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2072 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2080 av_copy_packet_side_data(&opkt, pkt);
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086 /* store AVPicture in AVPacket, as expected by the output format */
2087 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2089 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2093 opkt.data = (uint8_t *)&pict;
2094 opkt.size = sizeof(AVPicture);
2095 opkt.flags |= AV_PKT_FLAG_KEY;
2099 output_packet(of, &opkt, ost, 0);
2102 int guess_input_channel_layout(InputStream *ist)
2104 AVCodecContext *dec = ist->dec_ctx;
2106 if (!dec->channel_layout) {
2107 char layout_name[256];
2109 if (dec->channels > ist->guess_layout_max)
2111 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2112 if (!dec->channel_layout)
2114 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115 dec->channels, dec->channel_layout);
2116 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2124 if (*got_output || ret<0)
2125 decode_error_stat[ret<0] ++;
2127 if (ret < 0 && exit_on_error)
2130 if (exit_on_error && *got_output && ist) {
2131 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2132 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2138 // Filters can be configured only if the formats of all inputs are known.
2139 static int ifilter_has_all_input_formats(FilterGraph *fg)
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2150 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2157 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2161 switch (ifilter->ist->st->codecpar->codec_type) {
2162 case AVMEDIA_TYPE_AUDIO:
2163 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164 ifilter->channels != frame->channels ||
2165 ifilter->channel_layout != frame->channel_layout;
2167 case AVMEDIA_TYPE_VIDEO:
2168 need_reinit |= ifilter->width != frame->width ||
2169 ifilter->height != frame->height;
2174 ret = ifilter_parameters_from_frame(ifilter, frame);
2179 /* (re)init the graph if possible, otherwise buffer the frame and return */
2180 if (need_reinit || !fg->graph) {
2181 for (i = 0; i < fg->nb_inputs; i++) {
2182 if (!ifilter_has_all_input_formats(fg)) {
2183 AVFrame *tmp = av_frame_clone(frame);
2185 return AVERROR(ENOMEM);
2186 av_frame_unref(frame);
2188 if (!av_fifo_space(ifilter->frame_queue)) {
2189 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2191 av_frame_free(&tmp);
2195 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2200 ret = reap_filters(1);
2201 if (ret < 0 && ret != AVERROR_EOF) {
2203 av_strerror(ret, errbuf, sizeof(errbuf));
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2209 ret = configure_filtergraph(fg);
2211 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2218 if (ret != AVERROR_EOF)
2219 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2232 if (ifilter->filter) {
2233 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2237 // the filtergraph was never configured
2238 FilterGraph *fg = ifilter->graph;
2239 for (i = 0; i < fg->nb_inputs; i++)
2240 if (!fg->inputs[i]->eof)
2242 if (i == fg->nb_inputs) {
2243 // All the input streams have finished without the filtergraph
2244 // ever being configured.
2245 // Mark the output streams as finished.
2246 for (j = 0; j < fg->nb_outputs; j++)
2247 finish_output_stream(fg->outputs[j]->ost);
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2257 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2265 ret = avcodec_send_packet(avctx, pkt);
2266 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267 // decoded frames with avcodec_receive_frame() until done.
2268 if (ret < 0 && ret != AVERROR_EOF)
2272 ret = avcodec_receive_frame(avctx, frame);
2273 if (ret < 0 && ret != AVERROR(EAGAIN))
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2286 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287 for (i = 0; i < ist->nb_filters; i++) {
2288 if (i < ist->nb_filters - 1) {
2289 f = ist->filter_frame;
2290 ret = av_frame_ref(f, decoded_frame);
2295 ret = ifilter_send_frame(ist->filters[i], f);
2296 if (ret == AVERROR_EOF)
2297 ret = 0; /* ignore */
2299 av_log(NULL, AV_LOG_ERROR,
2300 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2310 AVFrame *decoded_frame;
2311 AVCodecContext *avctx = ist->dec_ctx;
2313 AVRational decoded_frame_tb;
2315 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316 return AVERROR(ENOMEM);
2317 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318 return AVERROR(ENOMEM);
2319 decoded_frame = ist->decoded_frame;
2321 update_benchmark(NULL);
2322 ret = decode(avctx, decoded_frame, got_output, pkt);
2323 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2327 if (ret >= 0 && avctx->sample_rate <= 0) {
2328 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329 ret = AVERROR_INVALIDDATA;
2332 if (ret != AVERROR_EOF)
2333 check_decode_result(ist, got_output, ret);
2335 if (!*got_output || ret < 0)
2338 ist->samples_decoded += decoded_frame->nb_samples;
2339 ist->frames_decoded++;
2342 /* increment next_dts to use for the case where the input stream does not
2343 have timestamps or there are multiple frames in the packet */
2344 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2346 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2350 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351 decoded_frame_tb = ist->st->time_base;
2352 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353 decoded_frame->pts = pkt->pts;
2354 decoded_frame_tb = ist->st->time_base;
2356 decoded_frame->pts = ist->dts;
2357 decoded_frame_tb = AV_TIME_BASE_Q;
2359 if (decoded_frame->pts != AV_NOPTS_VALUE)
2360 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362 (AVRational){1, avctx->sample_rate});
2363 ist->nb_samples = decoded_frame->nb_samples;
2364 err = send_frame_to_filters(ist, decoded_frame);
2366 av_frame_unref(ist->filter_frame);
2367 av_frame_unref(decoded_frame);
2368 return err < 0 ? err : ret;
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2374 AVFrame *decoded_frame;
2375 int i, ret = 0, err = 0;
2376 int64_t best_effort_timestamp;
2377 int64_t dts = AV_NOPTS_VALUE;
2380 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2383 if (!eof && pkt && pkt->size == 0)
2386 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387 return AVERROR(ENOMEM);
2388 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389 return AVERROR(ENOMEM);
2390 decoded_frame = ist->decoded_frame;
2391 if (ist->dts != AV_NOPTS_VALUE)
2392 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2395 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2398 // The old code used to set dts on the drain packet, which does not work
2399 // with the new API anymore.
2401 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2403 return AVERROR(ENOMEM);
2404 ist->dts_buffer = new;
2405 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2408 update_benchmark(NULL);
2409 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2414 // The following line may be required in some cases where there is no parser
2415 // or the parser does not has_b_frames correctly
2416 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2420 av_log(ist->dec_ctx, AV_LOG_WARNING,
2421 "video_delay is larger in decoder than demuxer %d > %d.\n"
2422 "If you want to help, upload a sample "
2423 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425 ist->dec_ctx->has_b_frames,
2426 ist->st->codecpar->video_delay);
2429 if (ret != AVERROR_EOF)
2430 check_decode_result(ist, got_output, ret);
2432 if (*got_output && ret >= 0) {
2433 if (ist->dec_ctx->width != decoded_frame->width ||
2434 ist->dec_ctx->height != decoded_frame->height ||
2435 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437 decoded_frame->width,
2438 decoded_frame->height,
2439 decoded_frame->format,
2440 ist->dec_ctx->width,
2441 ist->dec_ctx->height,
2442 ist->dec_ctx->pix_fmt);
2446 if (!*got_output || ret < 0)
2449 if(ist->top_field_first>=0)
2450 decoded_frame->top_field_first = ist->top_field_first;
2452 ist->frames_decoded++;
2454 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2459 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2461 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2462 *duration_pts = decoded_frame->pkt_duration;
2464 if (ist->framerate.num)
2465 best_effort_timestamp = ist->cfr_next_pts++;
2467 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2468 best_effort_timestamp = ist->dts_buffer[0];
2470 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2471 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2472 ist->nb_dts_buffer--;
2475 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2476 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2478 if (ts != AV_NOPTS_VALUE)
2479 ist->next_pts = ist->pts = ts;
2483 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2484 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2485 ist->st->index, av_ts2str(decoded_frame->pts),
2486 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2487 best_effort_timestamp,
2488 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2489 decoded_frame->key_frame, decoded_frame->pict_type,
2490 ist->st->time_base.num, ist->st->time_base.den);
2493 if (ist->st->sample_aspect_ratio.num)
2494 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2496 err = send_frame_to_filters(ist, decoded_frame);
2499 av_frame_unref(ist->filter_frame);
2500 av_frame_unref(decoded_frame);
2501 return err < 0 ? err : ret;
2504 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2507 AVSubtitle subtitle;
2509 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2510 &subtitle, got_output, pkt);
2512 check_decode_result(NULL, got_output, ret);
2514 if (ret < 0 || !*got_output) {
2517 sub2video_flush(ist);
2521 if (ist->fix_sub_duration) {
2523 if (ist->prev_sub.got_output) {
2524 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2525 1000, AV_TIME_BASE);
2526 if (end < ist->prev_sub.subtitle.end_display_time) {
2527 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2528 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2529 ist->prev_sub.subtitle.end_display_time, end,
2530 end <= 0 ? ", dropping it" : "");
2531 ist->prev_sub.subtitle.end_display_time = end;
2534 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2535 FFSWAP(int, ret, ist->prev_sub.ret);
2536 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2544 if (ist->sub2video.frame) {
2545 sub2video_update(ist, &subtitle);
2546 } else if (ist->nb_filters) {
2547 if (!ist->sub2video.sub_queue)
2548 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2549 if (!ist->sub2video.sub_queue)
2551 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2552 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2556 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2560 if (!subtitle.num_rects)
2563 ist->frames_decoded++;
2565 for (i = 0; i < nb_output_streams; i++) {
2566 OutputStream *ost = output_streams[i];
2568 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2569 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2572 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2577 avsubtitle_free(&subtitle);
2581 static int send_filter_eof(InputStream *ist)
2584 /* TODO keep pts also in stream time base to avoid converting back */
2585 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2586 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2588 for (i = 0; i < ist->nb_filters; i++) {
2589 ret = ifilter_send_eof(ist->filters[i], pts);
2596 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2597 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2601 int eof_reached = 0;
2604 if (!ist->saw_first_ts) {
2605 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2607 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2608 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2609 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2611 ist->saw_first_ts = 1;
2614 if (ist->next_dts == AV_NOPTS_VALUE)
2615 ist->next_dts = ist->dts;
2616 if (ist->next_pts == AV_NOPTS_VALUE)
2617 ist->next_pts = ist->pts;
2621 av_init_packet(&avpkt);
2628 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2629 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2630 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2631 ist->next_pts = ist->pts = ist->dts;
2634 // while we have more to decode or while the decoder did output something on EOF
2635 while (ist->decoding_needed) {
2636 int64_t duration_dts = 0;
2637 int64_t duration_pts = 0;
2639 int decode_failed = 0;
2641 ist->pts = ist->next_pts;
2642 ist->dts = ist->next_dts;
2644 switch (ist->dec_ctx->codec_type) {
2645 case AVMEDIA_TYPE_AUDIO:
2646 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2649 case AVMEDIA_TYPE_VIDEO:
2650 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2652 if (!repeating || !pkt || got_output) {
2653 if (pkt && pkt->duration) {
2654 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2655 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2656 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2657 duration_dts = ((int64_t)AV_TIME_BASE *
2658 ist->dec_ctx->framerate.den * ticks) /
2659 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2662 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2663 ist->next_dts += duration_dts;
2665 ist->next_dts = AV_NOPTS_VALUE;
2669 if (duration_pts > 0) {
2670 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2672 ist->next_pts += duration_dts;
2676 case AVMEDIA_TYPE_SUBTITLE:
2679 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2680 if (!pkt && ret >= 0)
2687 if (ret == AVERROR_EOF) {
2693 if (decode_failed) {
2694 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2695 ist->file_index, ist->st->index, av_err2str(ret));
2697 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2698 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2700 if (!decode_failed || exit_on_error)
2706 ist->got_output = 1;
2711 // During draining, we might get multiple output frames in this loop.
2712 // ffmpeg.c does not drain the filter chain on configuration changes,
2713 // which means if we send multiple frames at once to the filters, and
2714 // one of those frames changes configuration, the buffered frames will
2715 // be lost. This can upset certain FATE tests.
2716 // Decode only 1 frame per call on EOF to appease these FATE tests.
2717 // The ideal solution would be to rewrite decoding to use the new
2718 // decoding API in a better way.
2725 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2726 /* except when looping we need to flush but not to send an EOF */
2727 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2728 int ret = send_filter_eof(ist);
2730 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2735 /* handle stream copy */
2736 if (!ist->decoding_needed) {
2737 ist->dts = ist->next_dts;
2738 switch (ist->dec_ctx->codec_type) {
2739 case AVMEDIA_TYPE_AUDIO:
2740 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2741 ist->dec_ctx->sample_rate;
2743 case AVMEDIA_TYPE_VIDEO:
2744 if (ist->framerate.num) {
2745 // TODO: Remove work-around for c99-to-c89 issue 7
2746 AVRational time_base_q = AV_TIME_BASE_Q;
2747 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2748 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2749 } else if (pkt->duration) {
2750 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2751 } else if(ist->dec_ctx->framerate.num != 0) {
2752 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2753 ist->next_dts += ((int64_t)AV_TIME_BASE *
2754 ist->dec_ctx->framerate.den * ticks) /
2755 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2759 ist->pts = ist->dts;
2760 ist->next_pts = ist->next_dts;
2762 for (i = 0; pkt && i < nb_output_streams; i++) {
2763 OutputStream *ost = output_streams[i];
2765 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2768 do_streamcopy(ist, ost, pkt);
2771 return !eof_reached;
2774 static void print_sdp(void)
2779 AVIOContext *sdp_pb;
2780 AVFormatContext **avc;
2782 for (i = 0; i < nb_output_files; i++) {
2783 if (!output_files[i]->header_written)
2787 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2790 for (i = 0, j = 0; i < nb_output_files; i++) {
2791 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2792 avc[j] = output_files[i]->ctx;
2800 av_sdp_create(avc, j, sdp, sizeof(sdp));
2802 if (!sdp_filename) {
2803 printf("SDP:\n%s\n", sdp);
2806 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2807 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2809 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2810 avio_closep(&sdp_pb);
2811 av_freep(&sdp_filename);
2819 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2822 for (i = 0; hwaccels[i].name; i++)
2823 if (hwaccels[i].pix_fmt == pix_fmt)
2824 return &hwaccels[i];
2828 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2830 InputStream *ist = s->opaque;
2831 const enum AVPixelFormat *p;
2834 for (p = pix_fmts; *p != -1; p++) {
2835 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2836 const HWAccel *hwaccel;
2838 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2841 hwaccel = get_hwaccel(*p);
2843 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2844 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2847 ret = hwaccel->init(s);
2849 if (ist->hwaccel_id == hwaccel->id) {
2850 av_log(NULL, AV_LOG_FATAL,
2851 "%s hwaccel requested for input stream #%d:%d, "
2852 "but cannot be initialized.\n", hwaccel->name,
2853 ist->file_index, ist->st->index);
2854 return AV_PIX_FMT_NONE;
2859 if (ist->hw_frames_ctx) {
2860 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2861 if (!s->hw_frames_ctx)
2862 return AV_PIX_FMT_NONE;
2865 ist->active_hwaccel_id = hwaccel->id;
2866 ist->hwaccel_pix_fmt = *p;
2873 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2875 InputStream *ist = s->opaque;
2877 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2878 return ist->hwaccel_get_buffer(s, frame, flags);
2880 return avcodec_default_get_buffer2(s, frame, flags);
2883 static int init_input_stream(int ist_index, char *error, int error_len)
2886 InputStream *ist = input_streams[ist_index];
2888 if (ist->decoding_needed) {
2889 AVCodec *codec = ist->dec;
2891 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2892 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2893 return AVERROR(EINVAL);
2896 ist->dec_ctx->opaque = ist;
2897 ist->dec_ctx->get_format = get_format;
2898 ist->dec_ctx->get_buffer2 = get_buffer;
2899 ist->dec_ctx->thread_safe_callbacks = 1;
2901 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2902 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2903 (ist->decoding_needed & DECODING_FOR_OST)) {
2904 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2905 if (ist->decoding_needed & DECODING_FOR_FILTER)
2906 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2909 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2911 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2912 * audio, and video decoders such as cuvid or mediacodec */
2913 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2915 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2916 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2917 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2918 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2919 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2921 ret = hw_device_setup_for_decode(ist);
2923 snprintf(error, error_len, "Device setup failed for "
2924 "decoder on input stream #%d:%d : %s",
2925 ist->file_index, ist->st->index, av_err2str(ret));
2929 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2930 if (ret == AVERROR_EXPERIMENTAL)
2931 abort_codec_experimental(codec, 0);
2933 snprintf(error, error_len,
2934 "Error while opening decoder for input stream "
2936 ist->file_index, ist->st->index, av_err2str(ret));
2939 assert_avoptions(ist->decoder_opts);
2942 ist->next_pts = AV_NOPTS_VALUE;
2943 ist->next_dts = AV_NOPTS_VALUE;
2948 static InputStream *get_input_stream(OutputStream *ost)
2950 if (ost->source_index >= 0)
2951 return input_streams[ost->source_index];
2955 static int compare_int64(const void *a, const void *b)
2957 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2960 /* open the muxer when all the streams are initialized */
2961 static int check_init_output_file(OutputFile *of, int file_index)
2965 for (i = 0; i < of->ctx->nb_streams; i++) {
2966 OutputStream *ost = output_streams[of->ost_index + i];
2967 if (!ost->initialized)
2971 of->ctx->interrupt_callback = int_cb;
2973 ret = avformat_write_header(of->ctx, &of->opts);
2975 av_log(NULL, AV_LOG_ERROR,
2976 "Could not write header for output file #%d "
2977 "(incorrect codec parameters ?): %s\n",
2978 file_index, av_err2str(ret));
2981 //assert_avoptions(of->opts);
2982 of->header_written = 1;
2984 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2986 if (sdp_filename || want_sdp)
2989 /* flush the muxing queues */
2990 for (i = 0; i < of->ctx->nb_streams; i++) {
2991 OutputStream *ost = output_streams[of->ost_index + i];
2993 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2994 if (!av_fifo_size(ost->muxing_queue))
2995 ost->mux_timebase = ost->st->time_base;
2997 while (av_fifo_size(ost->muxing_queue)) {
2999 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3000 write_packet(of, &pkt, ost, 1);
3007 static int init_output_bsfs(OutputStream *ost)
3012 if (!ost->nb_bitstream_filters)
3015 for (i = 0; i < ost->nb_bitstream_filters; i++) {
3016 ctx = ost->bsf_ctx[i];
3018 ret = avcodec_parameters_copy(ctx->par_in,
3019 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3023 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3025 ret = av_bsf_init(ctx);
3027 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3028 ost->bsf_ctx[i]->filter->name);
3033 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3034 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3038 ost->st->time_base = ctx->time_base_out;
3043 static int init_output_stream_streamcopy(OutputStream *ost)
3045 OutputFile *of = output_files[ost->file_index];
3046 InputStream *ist = get_input_stream(ost);
3047 AVCodecParameters *par_dst = ost->st->codecpar;
3048 AVCodecParameters *par_src = ost->ref_par;
3051 uint32_t codec_tag = par_dst->codec_tag;
3053 av_assert0(ist && !ost->filter);
3055 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3057 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3059 av_log(NULL, AV_LOG_FATAL,
3060 "Error setting up codec context options.\n");
3063 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3066 unsigned int codec_tag_tmp;
3067 if (!of->ctx->oformat->codec_tag ||
3068 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3069 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3070 codec_tag = par_src->codec_tag;
3073 ret = avcodec_parameters_copy(par_dst, par_src);
3077 par_dst->codec_tag = codec_tag;
3079 if (!ost->frame_rate.num)
3080 ost->frame_rate = ist->framerate;
3081 ost->st->avg_frame_rate = ost->frame_rate;
3083 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3087 // copy timebase while removing common factors
3088 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3089 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3091 // copy estimated duration as a hint to the muxer
3092 if (ost->st->duration <= 0 && ist->st->duration > 0)
3093 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3096 ost->st->disposition = ist->st->disposition;
3098 if (ist->st->nb_side_data) {
3099 for (i = 0; i < ist->st->nb_side_data; i++) {
3100 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3103 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3105 return AVERROR(ENOMEM);
3106 memcpy(dst_data, sd_src->data, sd_src->size);
3110 if (ost->rotate_overridden) {
3111 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3112 sizeof(int32_t) * 9);
3114 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3117 ost->parser = av_parser_init(par_dst->codec_id);
3118 ost->parser_avctx = avcodec_alloc_context3(NULL);
3119 if (!ost->parser_avctx)
3120 return AVERROR(ENOMEM);
3122 switch (par_dst->codec_type) {
3123 case AVMEDIA_TYPE_AUDIO:
3124 if (audio_volume != 256) {
3125 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3128 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3129 par_dst->block_align= 0;
3130 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3131 par_dst->block_align= 0;
3133 case AVMEDIA_TYPE_VIDEO:
3134 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3136 av_mul_q(ost->frame_aspect_ratio,
3137 (AVRational){ par_dst->height, par_dst->width });
3138 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3139 "with stream copy may produce invalid files\n");
3141 else if (ist->st->sample_aspect_ratio.num)
3142 sar = ist->st->sample_aspect_ratio;
3144 sar = par_src->sample_aspect_ratio;
3145 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3146 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3147 ost->st->r_frame_rate = ist->st->r_frame_rate;
3151 ost->mux_timebase = ist->st->time_base;
3156 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3158 AVDictionaryEntry *e;
3160 uint8_t *encoder_string;
3161 int encoder_string_len;
3162 int format_flags = 0;
3163 int codec_flags = 0;
3165 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3168 e = av_dict_get(of->opts, "fflags", NULL, 0);
3170 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3173 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3175 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3177 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3180 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3183 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3184 encoder_string = av_mallocz(encoder_string_len);
3185 if (!encoder_string)
3188 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3189 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3191 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3192 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3193 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3194 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3197 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3198 AVCodecContext *avctx)
3201 int n = 1, i, size, index = 0;
3204 for (p = kf; *p; p++)
3208 pts = av_malloc_array(size, sizeof(*pts));
3210 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3215 for (i = 0; i < n; i++) {
3216 char *next = strchr(p, ',');
3221 if (!memcmp(p, "chapters", 8)) {
3223 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3226 if (avf->nb_chapters > INT_MAX - size ||
3227 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3229 av_log(NULL, AV_LOG_FATAL,
3230 "Could not allocate forced key frames array.\n");
3233 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3234 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3236 for (j = 0; j < avf->nb_chapters; j++) {
3237 AVChapter *c = avf->chapters[j];
3238 av_assert1(index < size);
3239 pts[index++] = av_rescale_q(c->start, c->time_base,
3240 avctx->time_base) + t;
3245 t = parse_time_or_die("force_key_frames", p, 1);
3246 av_assert1(index < size);
3247 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3254 av_assert0(index == size);
3255 qsort(pts, size, sizeof(*pts), compare_int64);
3256 ost->forced_kf_count = size;
3257 ost->forced_kf_pts = pts;
3260 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3262 InputStream *ist = get_input_stream(ost);
3263 AVCodecContext *enc_ctx = ost->enc_ctx;
3264 AVFormatContext *oc;
3266 if (ost->enc_timebase.num > 0) {
3267 enc_ctx->time_base = ost->enc_timebase;
3271 if (ost->enc_timebase.num < 0) {
3273 enc_ctx->time_base = ist->st->time_base;
3277 oc = output_files[ost->file_index]->ctx;
3278 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3281 enc_ctx->time_base = default_time_base;
3284 static int init_output_stream_encode(OutputStream *ost)
3286 InputStream *ist = get_input_stream(ost);
3287 AVCodecContext *enc_ctx = ost->enc_ctx;
3288 AVCodecContext *dec_ctx = NULL;
3289 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3292 set_encoder_id(output_files[ost->file_index], ost);
3294 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3295 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3296 // which have to be filtered out to prevent leaking them to output files.
3297 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3300 ost->st->disposition = ist->st->disposition;
3302 dec_ctx = ist->dec_ctx;
3304 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3306 for (j = 0; j < oc->nb_streams; j++) {
3307 AVStream *st = oc->streams[j];
3308 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3311 if (j == oc->nb_streams)
3312 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3313 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3314 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3317 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3318 if (!ost->frame_rate.num)
3319 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3320 if (ist && !ost->frame_rate.num)
3321 ost->frame_rate = ist->framerate;
3322 if (ist && !ost->frame_rate.num)
3323 ost->frame_rate = ist->st->r_frame_rate;
3324 if (ist && !ost->frame_rate.num) {
3325 ost->frame_rate = (AVRational){25, 1};
3326 av_log(NULL, AV_LOG_WARNING,
3328 "about the input framerate is available. Falling "
3329 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3330 "if you want a different framerate.\n",
3331 ost->file_index, ost->index);
3333 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3334 if (ost->enc->supported_framerates && !ost->force_fps) {
3335 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3336 ost->frame_rate = ost->enc->supported_framerates[idx];
3338 // reduce frame rate for mpeg4 to be within the spec limits
3339 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3340 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3341 ost->frame_rate.num, ost->frame_rate.den, 65535);
3345 switch (enc_ctx->codec_type) {
3346 case AVMEDIA_TYPE_AUDIO:
3347 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3349 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3350 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3351 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3352 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3353 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3355 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3358 case AVMEDIA_TYPE_VIDEO:
3359 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3361 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3362 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3363 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3364 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3365 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3366 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3368 for (j = 0; j < ost->forced_kf_count; j++)
3369 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3371 enc_ctx->time_base);
3373 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3374 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3375 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3376 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3377 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3378 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3380 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3382 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3383 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3385 enc_ctx->framerate = ost->frame_rate;
3387 ost->st->avg_frame_rate = ost->frame_rate;
3390 enc_ctx->width != dec_ctx->width ||
3391 enc_ctx->height != dec_ctx->height ||
3392 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3393 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3396 if (ost->forced_keyframes) {
3397 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3398 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3399 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3401 av_log(NULL, AV_LOG_ERROR,
3402 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3405 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3406 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3407 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3408 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3410 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3411 // parse it only for static kf timings
3412 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3413 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3417 case AVMEDIA_TYPE_SUBTITLE:
3418 enc_ctx->time_base = AV_TIME_BASE_Q;
3419 if (!enc_ctx->width) {
3420 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3421 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3424 case AVMEDIA_TYPE_DATA:
3431 ost->mux_timebase = enc_ctx->time_base;
3436 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3440 if (ost->encoding_needed) {
3441 AVCodec *codec = ost->enc;
3442 AVCodecContext *dec = NULL;
3445 ret = init_output_stream_encode(ost);
3449 if ((ist = get_input_stream(ost)))
3451 if (dec && dec->subtitle_header) {
3452 /* ASS code assumes this buffer is null terminated so add extra byte. */
3453 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3454 if (!ost->enc_ctx->subtitle_header)
3455 return AVERROR(ENOMEM);
3456 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3457 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3459 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3460 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3461 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3463 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3464 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3465 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3467 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3468 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3469 av_buffersink_get_format(ost->filter->filter)) {
3470 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3471 if (!ost->enc_ctx->hw_frames_ctx)
3472 return AVERROR(ENOMEM);
3474 ret = hw_device_setup_for_encode(ost);
3476 snprintf(error, error_len, "Device setup failed for "
3477 "encoder on output stream #%d:%d : %s",
3478 ost->file_index, ost->index, av_err2str(ret));
3483 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3484 if (ret == AVERROR_EXPERIMENTAL)
3485 abort_codec_experimental(codec, 1);
3486 snprintf(error, error_len,
3487 "Error while opening encoder for output stream #%d:%d - "
3488 "maybe incorrect parameters such as bit_rate, rate, width or height",
3489 ost->file_index, ost->index);
3492 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3493 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3494 av_buffersink_set_frame_size(ost->filter->filter,
3495 ost->enc_ctx->frame_size);
3496 assert_avoptions(ost->encoder_opts);
3497 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3498 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3499 " It takes bits/s as argument, not kbits/s\n");
3501 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3503 av_log(NULL, AV_LOG_FATAL,
3504 "Error initializing the output stream codec context.\n");
3508 * FIXME: ost->st->codec should't be needed here anymore.
3510 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3514 if (ost->enc_ctx->nb_coded_side_data) {
3517 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3518 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3521 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3523 return AVERROR(ENOMEM);
3524 memcpy(dst_data, sd_src->data, sd_src->size);
3529 * Add global input side data. For now this is naive, and copies it
3530 * from the input stream's global side data. All side data should
3531 * really be funneled over AVFrame and libavfilter, then added back to
3532 * packet side data, and then potentially using the first packet for
3537 for (i = 0; i < ist->st->nb_side_data; i++) {
3538 AVPacketSideData *sd = &ist->st->side_data[i];
3539 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3541 return AVERROR(ENOMEM);
3542 memcpy(dst, sd->data, sd->size);
3543 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3544 av_display_rotation_set((uint32_t *)dst, 0);
3548 // copy timebase while removing common factors
3549 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3550 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3552 // copy estimated duration as a hint to the muxer
3553 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3554 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3556 ost->st->codec->codec= ost->enc_ctx->codec;
3557 } else if (ost->stream_copy) {
3558 ret = init_output_stream_streamcopy(ost);
3563 * FIXME: will the codec context used by the parser during streamcopy
3564 * This should go away with the new parser API.
3566 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3571 // parse user provided disposition, and update stream values
3572 if (ost->disposition) {
3573 static const AVOption opts[] = {
3574 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3575 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3576 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3577 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3578 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3579 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3580 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3581 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3582 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3583 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3584 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3585 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3586 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3587 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3590 static const AVClass class = {
3592 .item_name = av_default_item_name,
3594 .version = LIBAVUTIL_VERSION_INT,
3596 const AVClass *pclass = &class;
3598 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3603 /* initialize bitstream filters for the output stream
3604 * needs to be done here, because the codec id for streamcopy is not
3605 * known until now */
3606 ret = init_output_bsfs(ost);
3610 ost->initialized = 1;
3612 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3619 static void report_new_stream(int input_index, AVPacket *pkt)
3621 InputFile *file = input_files[input_index];
3622 AVStream *st = file->ctx->streams[pkt->stream_index];
3624 if (pkt->stream_index < file->nb_streams_warn)
3626 av_log(file->ctx, AV_LOG_WARNING,
3627 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3628 av_get_media_type_string(st->codecpar->codec_type),
3629 input_index, pkt->stream_index,
3630 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3631 file->nb_streams_warn = pkt->stream_index + 1;
3634 static int transcode_init(void)
3636 int ret = 0, i, j, k;
3637 AVFormatContext *oc;
3640 char error[1024] = {0};
3642 for (i = 0; i < nb_filtergraphs; i++) {
3643 FilterGraph *fg = filtergraphs[i];
3644 for (j = 0; j < fg->nb_outputs; j++) {
3645 OutputFilter *ofilter = fg->outputs[j];
3646 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3648 if (fg->nb_inputs != 1)
3650 for (k = nb_input_streams-1; k >= 0 ; k--)
3651 if (fg->inputs[0]->ist == input_streams[k])
3653 ofilter->ost->source_index = k;
3657 /* init framerate emulation */
3658 for (i = 0; i < nb_input_files; i++) {
3659 InputFile *ifile = input_files[i];
3660 if (ifile->rate_emu)
3661 for (j = 0; j < ifile->nb_streams; j++)
3662 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3665 /* init input streams */
3666 for (i = 0; i < nb_input_streams; i++)
3667 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3668 for (i = 0; i < nb_output_streams; i++) {
3669 ost = output_streams[i];
3670 avcodec_close(ost->enc_ctx);
3675 /* open each encoder */
3676 for (i = 0; i < nb_output_streams; i++) {
3677 // skip streams fed from filtergraphs until we have a frame for them
3678 if (output_streams[i]->filter)
3681 ret = init_output_stream(output_streams[i], error, sizeof(error));
3686 /* discard unused programs */
3687 for (i = 0; i < nb_input_files; i++) {
3688 InputFile *ifile = input_files[i];
3689 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3690 AVProgram *p = ifile->ctx->programs[j];
3691 int discard = AVDISCARD_ALL;
3693 for (k = 0; k < p->nb_stream_indexes; k++)
3694 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3695 discard = AVDISCARD_DEFAULT;
3698 p->discard = discard;
3702 /* write headers for files with no streams */
3703 for (i = 0; i < nb_output_files; i++) {
3704 oc = output_files[i]->ctx;
3705 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3706 ret = check_init_output_file(output_files[i], i);
3713 /* dump the stream mapping */
3714 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3715 for (i = 0; i < nb_input_streams; i++) {
3716 ist = input_streams[i];
3718 for (j = 0; j < ist->nb_filters; j++) {
3719 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3720 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3721 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3722 ist->filters[j]->name);
3723 if (nb_filtergraphs > 1)
3724 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3725 av_log(NULL, AV_LOG_INFO, "\n");
3730 for (i = 0; i < nb_output_streams; i++) {
3731 ost = output_streams[i];
3733 if (ost->attachment_filename) {
3734 /* an attached file */
3735 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3736 ost->attachment_filename, ost->file_index, ost->index);
3740 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3741 /* output from a complex graph */
3742 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3743 if (nb_filtergraphs > 1)
3744 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3746 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3747 ost->index, ost->enc ? ost->enc->name : "?");
3751 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3752 input_streams[ost->source_index]->file_index,
3753 input_streams[ost->source_index]->st->index,
3756 if (ost->sync_ist != input_streams[ost->source_index])
3757 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3758 ost->sync_ist->file_index,
3759 ost->sync_ist->st->index);
3760 if (ost->stream_copy)
3761 av_log(NULL, AV_LOG_INFO, " (copy)");
3763 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3764 const AVCodec *out_codec = ost->enc;
3765 const char *decoder_name = "?";
3766 const char *in_codec_name = "?";
3767 const char *encoder_name = "?";
3768 const char *out_codec_name = "?";
3769 const AVCodecDescriptor *desc;
3772 decoder_name = in_codec->name;
3773 desc = avcodec_descriptor_get(in_codec->id);
3775 in_codec_name = desc->name;
3776 if (!strcmp(decoder_name, in_codec_name))
3777 decoder_name = "native";
3781 encoder_name = out_codec->name;
3782 desc = avcodec_descriptor_get(out_codec->id);
3784 out_codec_name = desc->name;
3785 if (!strcmp(encoder_name, out_codec_name))
3786 encoder_name = "native";
3789 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3790 in_codec_name, decoder_name,
3791 out_codec_name, encoder_name);
3793 av_log(NULL, AV_LOG_INFO, "\n");
3797 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3801 atomic_store(&transcode_init_done, 1);
3806 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3807 static int need_output(void)
3811 for (i = 0; i < nb_output_streams; i++) {
3812 OutputStream *ost = output_streams[i];
3813 OutputFile *of = output_files[ost->file_index];
3814 AVFormatContext *os = output_files[ost->file_index]->ctx;
3816 if (ost->finished ||
3817 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3819 if (ost->frame_number >= ost->max_frames) {
3821 for (j = 0; j < of->ctx->nb_streams; j++)
3822 close_output_stream(output_streams[of->ost_index + j]);
3833 * Select the output stream to process.
3835 * @return selected output stream, or NULL if none available
3837 static OutputStream *choose_output(void)
3840 int64_t opts_min = INT64_MAX;
3841 OutputStream *ost_min = NULL;
3843 for (i = 0; i < nb_output_streams; i++) {
3844 OutputStream *ost = output_streams[i];
3845 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3846 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3848 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3849 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3851 if (!ost->initialized && !ost->inputs_done)
3854 if (!ost->finished && opts < opts_min) {
3856 ost_min = ost->unavailable ? NULL : ost;
3862 static void set_tty_echo(int on)
3866 if (tcgetattr(0, &tty) == 0) {
3867 if (on) tty.c_lflag |= ECHO;
3868 else tty.c_lflag &= ~ECHO;
3869 tcsetattr(0, TCSANOW, &tty);
3874 static int check_keyboard_interaction(int64_t cur_time)
3877 static int64_t last_time;
3878 if (received_nb_signals)
3879 return AVERROR_EXIT;
3880 /* read_key() returns 0 on EOF */
3881 if(cur_time - last_time >= 100000 && !run_as_daemon){
3883 last_time = cur_time;
3887 return AVERROR_EXIT;
3888 if (key == '+') av_log_set_level(av_log_get_level()+10);
3889 if (key == '-') av_log_set_level(av_log_get_level()-10);
3890 if (key == 's') qp_hist ^= 1;
3893 do_hex_dump = do_pkt_dump = 0;
3894 } else if(do_pkt_dump){
3898 av_log_set_level(AV_LOG_DEBUG);
3900 if (key == 'c' || key == 'C'){
3901 char buf[4096], target[64], command[256], arg[256] = {0};
3904 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3907 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3912 fprintf(stderr, "\n");
3914 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3915 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3916 target, time, command, arg);
3917 for (i = 0; i < nb_filtergraphs; i++) {
3918 FilterGraph *fg = filtergraphs[i];
3921 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3922 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3923 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3924 } else if (key == 'c') {
3925 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3926 ret = AVERROR_PATCHWELCOME;
3928 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3930 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3935 av_log(NULL, AV_LOG_ERROR,
3936 "Parse error, at least 3 arguments were expected, "
3937 "only %d given in string '%s'\n", n, buf);
3940 if (key == 'd' || key == 'D'){
3943 debug = input_streams[0]->st->codec->debug<<1;
3944 if(!debug) debug = 1;
3945 while(debug & (FF_DEBUG_DCT_COEFF
3947 |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3949 )) //unsupported, would just crash
3956 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3961 fprintf(stderr, "\n");
3962 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3963 fprintf(stderr,"error parsing debug value\n");
3965 for(i=0;i<nb_input_streams;i++) {
3966 input_streams[i]->st->codec->debug = debug;
3968 for(i=0;i<nb_output_streams;i++) {
3969 OutputStream *ost = output_streams[i];
3970 ost->enc_ctx->debug = debug;
3972 if(debug) av_log_set_level(AV_LOG_DEBUG);
3973 fprintf(stderr,"debug=%d\n", debug);
3976 fprintf(stderr, "key function\n"
3977 "? show this help\n"
3978 "+ increase verbosity\n"
3979 "- decrease verbosity\n"
3980 "c Send command to first matching filter supporting it\n"
3981 "C Send/Queue command to all matching filters\n"
3982 "D cycle through available debug modes\n"
3983 "h dump packets/hex press to cycle through the 3 states\n"
3985 "s Show QP histogram\n"
3992 static void *input_thread(void *arg)
3995 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4000 ret = av_read_frame(f->ctx, &pkt);
4002 if (ret == AVERROR(EAGAIN)) {
4007 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4010 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4011 if (flags && ret == AVERROR(EAGAIN)) {
4013 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4014 av_log(f->ctx, AV_LOG_WARNING,
4015 "Thread message queue blocking; consider raising the "
4016 "thread_queue_size option (current value: %d)\n",
4017 f->thread_queue_size);
4020 if (ret != AVERROR_EOF)
4021 av_log(f->ctx, AV_LOG_ERROR,
4022 "Unable to send packet to main thread: %s\n",
4024 av_packet_unref(&pkt);
4025 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4033 static void free_input_threads(void)
4037 for (i = 0; i < nb_input_files; i++) {
4038 InputFile *f = input_files[i];
4041 if (!f || !f->in_thread_queue)
4043 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4044 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4045 av_packet_unref(&pkt);
4047 pthread_join(f->thread, NULL);
4049 av_thread_message_queue_free(&f->in_thread_queue);
4053 static int init_input_threads(void)
4057 if (nb_input_files == 1)
4060 for (i = 0; i < nb_input_files; i++) {
4061 InputFile *f = input_files[i];
4063 if (f->ctx->pb ? !f->ctx->pb->seekable :
4064 strcmp(f->ctx->iformat->name, "lavfi"))
4065 f->non_blocking = 1;
4066 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4067 f->thread_queue_size, sizeof(AVPacket));
4071 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4072 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4073 av_thread_message_queue_free(&f->in_thread_queue);
4074 return AVERROR(ret);
4080 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4082 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4084 AV_THREAD_MESSAGE_NONBLOCK : 0);
4088 static int get_input_packet(InputFile *f, AVPacket *pkt)
4092 for (i = 0; i < f->nb_streams; i++) {
4093 InputStream *ist = input_streams[f->ist_index + i];
4094 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4095 int64_t now = av_gettime_relative() - ist->start;
4097 return AVERROR(EAGAIN);
4102 if (nb_input_files > 1)
4103 return get_input_packet_mt(f, pkt);
4105 return av_read_frame(f->ctx, pkt);
4108 static int got_eagain(void)
4111 for (i = 0; i < nb_output_streams; i++)
4112 if (output_streams[i]->unavailable)
4117 static void reset_eagain(void)
4120 for (i = 0; i < nb_input_files; i++)
4121 input_files[i]->eagain = 0;
4122 for (i = 0; i < nb_output_streams; i++)
4123 output_streams[i]->unavailable = 0;
4126 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4127 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4128 AVRational time_base)
4134 return tmp_time_base;
4137 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4140 return tmp_time_base;
4146 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4149 AVCodecContext *avctx;
4150 int i, ret, has_audio = 0;
4151 int64_t duration = 0;
4153 ret = av_seek_frame(is, -1, is->start_time, 0);
4157 for (i = 0; i < ifile->nb_streams; i++) {
4158 ist = input_streams[ifile->ist_index + i];
4159 avctx = ist->dec_ctx;
4162 if (ist->decoding_needed) {
4163 process_input_packet(ist, NULL, 1);
4164 avcodec_flush_buffers(avctx);
4167 /* duration is the length of the last frame in a stream
4168 * when audio stream is present we don't care about
4169 * last video frame length because it's not defined exactly */
4170 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4174 for (i = 0; i < ifile->nb_streams; i++) {
4175 ist = input_streams[ifile->ist_index + i];
4176 avctx = ist->dec_ctx;
4179 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4180 AVRational sample_rate = {1, avctx->sample_rate};
4182 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4186 if (ist->framerate.num) {
4187 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4188 } else if (ist->st->avg_frame_rate.num) {
4189 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4190 } else duration = 1;
4192 if (!ifile->duration)
4193 ifile->time_base = ist->st->time_base;
4194 /* the total duration of the stream, max_pts - min_pts is
4195 * the duration of the stream without the last frame */
4196 duration += ist->max_pts - ist->min_pts;
4197 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4201 if (ifile->loop > 0)
4209 * - 0 -- one packet was read and processed
4210 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4211 * this function should be called again
4212 * - AVERROR_EOF -- this function should not be called again
4214 static int process_input(int file_index)
4216 InputFile *ifile = input_files[file_index];
4217 AVFormatContext *is;
4225 ret = get_input_packet(ifile, &pkt);
4227 if (ret == AVERROR(EAGAIN)) {
4231 if (ret < 0 && ifile->loop) {