#include "ffmpeg.h"
#include "cmdutils.h"
+#include "sync_queue.h"
#include "libavutil/avassert.h"
static int64_t decode_error_stat[2];
unsigned nb_output_dumped = 0;
-int want_sdp = 1;
-
static BenchmarkTimeStamps current_time;
AVIOContext *progress_avio = NULL;
-static uint8_t *subtitle_out;
-
InputStream **input_streams = NULL;
int nb_input_streams = 0;
InputFile **input_files = NULL;
static int restore_tty;
#endif
-#if HAVE_THREADS
-static void free_input_threads(void);
-#endif
-
/* sub2video hack:
Convert subtitles to video with alpha to insert them in filter graphs.
This is a temporary solution until libavfilter gets real subtitles support.
num_rects = 0;
}
if (sub2video_get_blank_frame(ist) < 0) {
- av_log(ist->dec_ctx, AV_LOG_ERROR,
+ av_log(NULL, AV_LOG_ERROR,
"Impossible to get a blank canvas.\n");
return;
}
}
av_freep(&filtergraphs);
- av_freep(&subtitle_out);
-
/* close files */
for (i = 0; i < nb_output_files; i++)
of_close(&output_files[i]);
av_bsf_free(&ost->bsf_ctx);
av_frame_free(&ost->filtered_frame);
+ av_frame_free(&ost->sq_frame);
av_frame_free(&ost->last_frame);
av_packet_free(&ost->pkt);
av_dict_free(&ost->encoder_opts);
av_freep(&ost->avfilter);
av_freep(&ost->logfile_prefix);
+#if FFMPEG_OPT_MAP_CHANNEL
av_freep(&ost->audio_channels_map);
ost->audio_channels_mapped = 0;
+#endif
av_dict_free(&ost->sws_dict);
av_dict_free(&ost->swr_opts);
+ if (ost->enc_ctx)
+ av_freep(&ost->enc_ctx->stats_in);
avcodec_free_context(&ost->enc_ctx);
- avcodec_parameters_free(&ost->ref_par);
-
- if (ost->muxing_queue) {
- AVPacket *pkt;
- while (av_fifo_read(ost->muxing_queue, &pkt, 1) >= 0)
- av_packet_free(&pkt);
- av_fifo_freep2(&ost->muxing_queue);
- }
av_freep(&output_streams[i]);
}
-#if HAVE_THREADS
free_input_threads();
-#endif
for (i = 0; i < nb_input_files; i++) {
avformat_close_input(&input_files[i]->ctx);
- av_packet_free(&input_files[i]->pkt);
av_freep(&input_files[i]);
}
for (i = 0; i < nb_input_streams; i++) {
av_freep(&ist->dts_buffer);
avcodec_free_context(&ist->dec_ctx);
+ avcodec_parameters_free(&ist->par);
av_freep(&input_streams[i]);
}
static void close_output_stream(OutputStream *ost)
{
OutputFile *of = output_files[ost->file_index];
- AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
-
ost->finished |= ENCODER_FINISHED;
- if (of->shortest) {
- int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
- of->recording_time = FFMIN(of->recording_time, end);
- }
+
+ if (ost->sq_idx_encode >= 0)
+ sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
}
/*
static void output_packet(OutputFile *of, AVPacket *pkt,
OutputStream *ost, int eof)
{
+ const char *err_msg;
int ret = 0;
+ if (!eof && pkt->dts != AV_NOPTS_VALUE)
+ ost->last_mux_dts = av_rescale_q(pkt->dts, ost->mux_timebase, AV_TIME_BASE_Q);
+
/* apply the output bitstream filters */
if (ost->bsf_ctx) {
+ int bsf_eof = 0;
+
ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
+ if (ret < 0) {
+ err_msg = "submitting a packet for bitstream filtering";
+ goto fail;
+ }
+
+ while (!bsf_eof) {
+ ret = av_bsf_receive_packet(ost->bsf_ctx, pkt);
+ if (ret == AVERROR(EAGAIN))
+ return;
+ else if (ret == AVERROR_EOF)
+ bsf_eof = 1;
+ else if (ret < 0) {
+ err_msg = "applying bitstream filters to a packet";
+ goto fail;
+ }
+
+ ret = of_submit_packet(of, bsf_eof ? NULL : pkt, ost);
+ if (ret < 0)
+ goto mux_fail;
+ }
+ } else {
+ ret = of_submit_packet(of, eof ? NULL : pkt, ost);
if (ret < 0)
- goto finish;
- while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
- of_write_packet(of, pkt, ost, 0);
- if (ret == AVERROR(EAGAIN))
- ret = 0;
- } else if (!eof)
- of_write_packet(of, pkt, ost, 0);
-
-finish:
- if (ret < 0 && ret != AVERROR_EOF) {
- av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
- "packet for stream #%d:%d.\n", ost->file_index, ost->index);
- if(exit_on_error)
- exit_program(1);
+ goto mux_fail;
}
+
+ return;
+
+mux_fail:
+ err_msg = "submitting a packet to the muxer";
+
+fail:
+ av_log(NULL, AV_LOG_ERROR, "Error %s for output stream #%d:%d.\n",
+ err_msg, ost->file_index, ost->index);
+ if (exit_on_error)
+ exit_program(1);
+
}
static int check_recording_time(OutputStream *ost)
AVFrame *frame)
{
double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
+ int64_t orig_pts = AV_NOPTS_VALUE;
AVCodecContext *enc = ost->enc_ctx;
+ AVRational filter_tb = (AVRational){ -1, -1 };
if (!frame || frame->pts == AV_NOPTS_VALUE ||
!enc || !ost->filter || !ost->filter->graph->graph)
goto early_exit;
AVFilterContext *filter = ost->filter->filter;
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
- AVRational filter_tb = av_buffersink_get_time_base(filter);
AVRational tb = enc->time_base;
int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
+ filter_tb = av_buffersink_get_time_base(filter);
+ orig_pts = frame->pts;
tb.den <<= extra_bits;
float_pts =
early_exit:
if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
+ frame ? av_ts2str(orig_pts) : "NULL",
+ frame ? av_ts2timestr(orig_pts, &filter_tb) : "NULL",
+ filter_tb.num, filter_tb.den);
+
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
frame ? av_ts2str(frame->pts) : "NULL",
- frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
+ (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
float_pts,
enc ? enc->time_base.num : -1,
enc ? enc->time_base.den : -1);
if (frame) {
ost->frames_encoded++;
+ ost->samples_encoded += frame->nb_samples;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "encoder <- type:%s "
av_assert0(0);
}
+static int submit_encode_frame(OutputFile *of, OutputStream *ost,
+ AVFrame *frame)
+{
+ int ret;
+
+ if (ost->sq_idx_encode < 0)
+ return encode_frame(of, ost, frame);
+
+ if (frame) {
+ ret = av_frame_ref(ost->sq_frame, frame);
+ if (ret < 0)
+ return ret;
+ frame = ost->sq_frame;
+ }
+
+ ret = sq_send(of->sq_encode, ost->sq_idx_encode,
+ SQFRAME(frame));
+ if (ret < 0) {
+ if (frame)
+ av_frame_unref(frame);
+ if (ret != AVERROR_EOF)
+ return ret;
+ }
+
+ while (1) {
+ AVFrame *enc_frame = ost->sq_frame;
+
+ ret = sq_receive(of->sq_encode, ost->sq_idx_encode,
+ SQFRAME(enc_frame));
+ if (ret == AVERROR_EOF) {
+ enc_frame = NULL;
+ } else if (ret < 0) {
+ return (ret == AVERROR(EAGAIN)) ? 0 : ret;
+ }
+
+ ret = encode_frame(of, ost, enc_frame);
+ if (enc_frame)
+ av_frame_unref(enc_frame);
+ if (ret < 0) {
+ if (ret == AVERROR_EOF)
+ close_output_stream(ost);
+ return ret;
+ }
+ }
+}
+
static void do_audio_out(OutputFile *of, OutputStream *ost,
AVFrame *frame)
{
if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
frame->pts = ost->sync_opts;
ost->sync_opts = frame->pts + frame->nb_samples;
- ost->samples_encoded += frame->nb_samples;
- ret = encode_frame(of, ost, frame);
- if (ret < 0)
+ ret = submit_encode_frame(of, ost, frame);
+ if (ret < 0 && ret != AVERROR_EOF)
exit_program(1);
}
AVSubtitle *sub)
{
int subtitle_out_max_size = 1024 * 1024;
- int subtitle_out_size, nb, i;
+ int subtitle_out_size, nb, i, ret;
AVCodecContext *enc;
AVPacket *pkt = ost->pkt;
int64_t pts;
enc = ost->enc_ctx;
- if (!subtitle_out) {
- subtitle_out = av_malloc(subtitle_out_max_size);
- if (!subtitle_out) {
- av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
- exit_program(1);
- }
- }
-
/* Note: DVB subtitle need one packet to draw them and one other
packet to clear them */
/* XXX: signal it in the codec context ? */
if (!check_recording_time(ost))
return;
+ ret = av_new_packet(pkt, subtitle_out_max_size);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle encode buffer\n");
+ exit_program(1);
+ }
+
sub->pts = pts;
// start_display_time is required to be 0
sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
ost->frames_encoded++;
- subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
- subtitle_out_max_size, sub);
+ subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub);
if (i == 1)
sub->num_rects = save_num_rects;
if (subtitle_out_size < 0) {
exit_program(1);
}
- av_packet_unref(pkt);
- pkt->data = subtitle_out;
- pkt->size = subtitle_out_size;
+ av_shrink_packet(pkt, subtitle_out_size);
pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
(nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
next_picture &&
ist &&
- lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
- duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
+ lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
+ duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
}
if (!next_picture) {
switch (ost->vsync_method) {
case VSYNC_VSCFR:
- if (ost->frame_number == 0 && delta0 >= 0.5) {
+ if (ost->vsync_frame_number == 0 && delta0 >= 0.5) {
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
delta = duration;
delta0 = 0;
}
case VSYNC_CFR:
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
- if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
+ if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) {
nb_frames = 0;
} else if (delta < -1.1)
nb_frames = 0;
if (delta0 > 1.1)
nb0_frames = llrintf(delta0 - 0.6);
}
+ next_picture->duration = 1;
break;
case VSYNC_VFR:
if (delta <= -0.6)
nb_frames = 0;
else if (delta > 0.6)
ost->sync_opts = llrint(sync_ipts);
+ next_picture->duration = duration;
break;
case VSYNC_DROP:
case VSYNC_PASSTHROUGH:
+ next_picture->duration = duration;
ost->sync_opts = llrint(sync_ipts);
break;
default:
* But there may be reordering, so we can't throw away frames on encoder
* flush, we need to limit them here, before they go into encoder.
*/
- nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
+ nb_frames = FFMIN(nb_frames, ost->max_frames - ost->vsync_frame_number);
nb0_frames = FFMIN(nb0_frames, nb_frames);
memmove(ost->last_nb0_frames + 1,
nb_frames_drop++;
av_log(NULL, AV_LOG_VERBOSE,
"*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n",
- ost->frame_number, ost->st->index, ost->last_frame->pts);
+ ost->vsync_frame_number, ost->st->index, ost->last_frame->pts);
}
if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
if (nb_frames > dts_error_threshold * 30) {
av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
}
- ret = encode_frame(of, ost, in_picture);
- if (ret < 0)
+ ret = submit_encode_frame(of, ost, in_picture);
+ if (ret < 0 && ret != AVERROR_EOF)
exit_program(1);
ost->sync_opts++;
- ost->frame_number++;
+ ost->vsync_frame_number++;
}
av_frame_unref(ost->last_frame);
av_frame_move_ref(ost->last_frame, next_picture);
}
-static void finish_output_stream(OutputStream *ost)
-{
- OutputFile *of = output_files[ost->file_index];
- AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
-
- ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
-
- if (of->shortest) {
- int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
- of->recording_time = FFMIN(of->recording_time, end);
- }
-}
-
/**
* Get and encode new output from any of the filtergraphs, without causing
* activity.
continue;
}
+ if (filtered_frame->pts != AV_NOPTS_VALUE) {
+ AVRational tb = av_buffersink_get_time_base(filter);
+ ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb,
+ AV_TIME_BASE_Q);
+ }
+
switch (av_buffersink_get_type(filter)) {
case AVMEDIA_TYPE_VIDEO:
if (!ost->frame_aspect_ratio.num)
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
- switch (ost->enc_ctx->codec_type) {
+ AVCodecParameters *par = ost->st->codecpar;
+ switch (par->codec_type) {
case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
default: other_size += ost->data_size; break;
}
- extra_size += ost->enc_ctx->extradata_size;
+ extra_size += par->extradata_size;
data_size += ost->data_size;
- if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
+ if (ost->enc_ctx &&
+ (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
!= AV_CODEC_FLAG_PASS1)
pass1_used = 0;
}
for (j = 0; j < f->nb_streams; j++) {
InputStream *ist = input_streams[f->ist_index + j];
- enum AVMediaType type = ist->dec_ctx->codec_type;
+ enum AVMediaType type = ist->par->codec_type;
total_size += ist->data_size;
total_packets += ist->nb_packets;
uint64_t total_packets = 0, total_size = 0;
av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
- i, of->ctx->url);
+ i, of->url);
- for (j = 0; j < of->ctx->nb_streams; j++) {
+ for (j = 0; j < of->nb_streams; j++) {
OutputStream *ost = output_streams[of->ost_index + j];
- enum AVMediaType type = ost->enc_ctx->codec_type;
+ enum AVMediaType type = ost->st->codecpar->codec_type;
total_size += ost->data_size;
- total_packets += ost->packets_written;
+ total_packets += atomic_load(&ost->packets_written);
av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
i, j, av_get_media_type_string(type));
- if (ost->encoding_needed) {
+ if (ost->enc_ctx) {
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
ost->frames_encoded);
if (type == AVMEDIA_TYPE_AUDIO)
}
av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
- ost->packets_written, ost->data_size);
+ atomic_load(&ost->packets_written), ost->data_size);
av_log(NULL, AV_LOG_VERBOSE, "\n");
}
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
{
AVBPrint buf, buf_script;
- OutputStream *ost;
- AVFormatContext *oc;
- int64_t total_size;
- AVCodecContext *enc;
+ int64_t total_size = of_filesize(output_files[0]);
int vid, i;
double bitrate;
double speed;
t = (cur_time-timer_start) / 1000000.0;
-
- oc = output_files[0]->ctx;
-
- total_size = avio_size(oc->pb);
- if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
- total_size = avio_tell(oc->pb);
-
vid = 0;
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
for (i = 0; i < nb_output_streams; i++) {
- float q = -1;
- ost = output_streams[i];
- enc = ost->enc_ctx;
- if (!ost->stream_copy)
- q = ost->quality / (float) FF_QP2LAMBDA;
+ OutputStream * const ost = output_streams[i];
+ const AVCodecContext * const enc = ost->enc_ctx;
+ const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1;
- if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
av_bprintf(&buf, "q=%2.1f ", q);
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
ost->file_index, ost->index, q);
}
- if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
float fps;
- int64_t frame_number = ost->frame_number;
+ uint64_t frame_number = atomic_load(&ost->packets_written);
fps = t > 1 ? frame_number / t : 0;
av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
}
- if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
+ if (enc && (enc->flags & AV_CODEC_FLAG_PSNR) &&
+ (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
int j;
double error, error_sum = 0;
double scale, scale_sum = 0;
vid = 1;
}
/* compute min output value */
- if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
- pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
- ost->st->time_base, AV_TIME_BASE_Q));
+ if (ost->last_mux_dts != AV_NOPTS_VALUE) {
+ pts = FFMAX(pts, ost->last_mux_dts);
if (copy_ts) {
if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
copy_ts_first_pts = pts;
{
int i, ret;
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ OutputFile *of = output_files[ost->file_index];
+ if (ost->sq_idx_encode >= 0)
+ sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
+ }
+
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
AVCodecContext *enc = ost->enc_ctx;
OutputFile *of = output_files[ost->file_index];
- if (!ost->encoding_needed)
+ if (!enc)
continue;
// Try to enable encoding with no input frames.
for (x = 0; x < fg->nb_inputs; x++) {
InputFilter *ifilter = fg->inputs[x];
if (ifilter->format < 0 &&
- ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar) < 0) {
+ ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
exit_program(1);
}
exit_program(1);
}
- finish_output_stream(ost);
+ output_packet(of, ost->pkt, ost, 1);
}
init_output_stream_wrapper(ost, NULL, 1);
if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
- ret = encode_frame(of, ost, NULL);
+ ret = submit_encode_frame(of, ost, NULL);
if (ret != AVERROR_EOF)
exit_program(1);
}
return;
if (!ost->streamcopy_started && !ost->copy_prior_start) {
- int64_t comp_start = start_time;
- if (copy_ts && f->start_time != AV_NOPTS_VALUE)
- comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
if (pkt->pts == AV_NOPTS_VALUE ?
- ist->pts < comp_start :
- pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
+ ist->pts < ost->ts_copy_start :
+ pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base))
return;
}
if (pkt->dts == AV_NOPTS_VALUE) {
opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
} else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
- int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
+ int duration = av_get_audio_frame_duration2(ist->par, pkt->size);
if(!duration)
- duration = ist->dec_ctx->frame_size;
+ duration = ist->par->frame_size;
opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
- (AVRational){1, ist->dec_ctx->sample_rate}, duration,
+ (AVRational){1, ist->par->sample_rate}, duration,
&ist->filter_in_rescale_delta_last, ost->mux_timebase);
/* dts will be set immediately afterwards to what pts is now */
opkt->pts = opkt->dts - ost_tb_start_time;
ost->streamcopy_started = 1;
}
-int guess_input_channel_layout(InputStream *ist)
-{
- AVCodecContext *dec = ist->dec_ctx;
-
- if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
- char layout_name[256];
-
- if (dec->ch_layout.nb_channels > ist->guess_layout_max)
- return 0;
- av_channel_layout_default(&dec->ch_layout, dec->ch_layout.nb_channels);
- if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
- return 0;
- av_channel_layout_describe(&dec->ch_layout, layout_name, sizeof(layout_name));
- av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
- "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
- }
- return 1;
-}
-
static void check_decode_result(InputStream *ist, int *got_output, int ret)
{
if (*got_output || ret<0)
/* determine if the parameters for this input changed */
need_reinit = ifilter->format != frame->format;
- switch (ifilter->ist->st->codecpar->codec_type) {
+ switch (ifilter->ist->par->codec_type) {
case AVMEDIA_TYPE_AUDIO:
need_reinit |= ifilter->sample_rate != frame->sample_rate ||
av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
} else {
// the filtergraph was never configured
if (ifilter->format < 0) {
- ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
+ ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
if (ret < 0)
return ret;
}
// The following line may be required in some cases where there is no parser
// or the parser does not has_b_frames correctly
- if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
+ if (ist->par->video_delay < ist->dec_ctx->has_b_frames) {
if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
- ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
+ ist->par->video_delay = ist->dec_ctx->has_b_frames;
} else
av_log(ist->dec_ctx, AV_LOG_WARNING,
"video_delay is larger in decoder than demuxer %d > %d.\n"
"of this file to https://streams.videolan.org/upload/ "
"and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
ist->dec_ctx->has_b_frames,
- ist->st->codecpar->video_delay);
+ ist->par->video_delay);
}
if (ret != AVERROR_EOF)
if (err < 0)
goto fail;
}
- ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
best_effort_timestamp= decoded_frame->best_effort_timestamp;
- *duration_pts = decoded_frame->pkt_duration;
+ *duration_pts = decoded_frame->duration;
if (ist->framerate.num)
best_effort_timestamp = ist->cfr_next_pts++;
end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
1000, AV_TIME_BASE);
if (end < ist->prev_sub.subtitle.end_display_time) {
- av_log(ist->dec_ctx, AV_LOG_DEBUG,
+ av_log(NULL, AV_LOG_DEBUG,
"Subtitle duration reduced from %"PRId32" to %d%s\n",
ist->prev_sub.subtitle.end_display_time, end,
end <= 0 ? ", dropping it" : "");
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
- if (!check_output_constraints(ist, ost) || !ost->encoding_needed
+ if (!check_output_constraints(ist, ost) || !ost->enc_ctx
|| ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
continue;
/* pkt = NULL means EOF (needed to flush decoder buffers) */
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
{
+ const AVCodecParameters *par = ist->par;
int ret = 0, i;
int repeating = 0;
int eof_reached = 0;
if (pkt && pkt->dts != AV_NOPTS_VALUE) {
ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
- if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
+ if (par->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
ist->next_pts = ist->pts = ist->dts;
}
ist->pts = ist->next_pts;
ist->dts = ist->next_dts;
- switch (ist->dec_ctx->codec_type) {
+ switch (par->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
&decode_failed);
/* handle stream copy */
if (!ist->decoding_needed && pkt) {
ist->dts = ist->next_dts;
- switch (ist->dec_ctx->codec_type) {
+ switch (par->codec_type) {
case AVMEDIA_TYPE_AUDIO:
av_assert1(pkt->duration >= 0);
- if (ist->dec_ctx->sample_rate) {
- ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
- ist->dec_ctx->sample_rate;
+ if (par->sample_rate) {
+ ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
+ par->sample_rate;
} else {
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
}
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
- if (!check_output_constraints(ist, ost) || ost->encoding_needed)
+ if (!check_output_constraints(ist, ost) || ost->enc_ctx ||
+ (!pkt && no_eof))
continue;
do_streamcopy(ist, ost, pkt);
{
OutputFile *of = output_files[ost->file_index];
InputStream *ist = get_input_stream(ost);
- AVCodecParameters *par_dst = ost->st->codecpar;
- AVCodecParameters *par_src = ost->ref_par;
+ InputFile *ifile = input_files[ist->file_index];
+ AVCodecParameters *par = ost->st->codecpar;
+ AVCodecContext *codec_ctx;
AVRational sar;
int i, ret;
- uint32_t codec_tag = par_dst->codec_tag;
+ uint32_t codec_tag = par->codec_tag;
av_assert0(ist && !ost->filter);
- ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
+ codec_ctx = avcodec_alloc_context3(NULL);
+ if (!codec_ctx)
+ return AVERROR(ENOMEM);
+
+ ret = avcodec_parameters_to_context(codec_ctx, ist->par);
if (ret >= 0)
- ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
+ ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL,
"Error setting up codec context options.\n");
+ avcodec_free_context(&codec_ctx);
return ret;
}
- ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
+ ret = avcodec_parameters_from_context(par, codec_ctx);
+ avcodec_free_context(&codec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL,
"Error getting reference codec parameters.\n");
if (!codec_tag) {
unsigned int codec_tag_tmp;
if (!of->format->codec_tag ||
- av_codec_get_id (of->format->codec_tag, par_src->codec_tag) == par_src->codec_id ||
- !av_codec_get_tag2(of->format->codec_tag, par_src->codec_id, &codec_tag_tmp))
- codec_tag = par_src->codec_tag;
+ av_codec_get_id (of->format->codec_tag, par->codec_tag) == par->codec_id ||
+ !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp))
+ codec_tag = par->codec_tag;
}
- ret = avcodec_parameters_copy(par_dst, par_src);
- if (ret < 0)
- return ret;
-
- par_dst->codec_tag = codec_tag;
+ par->codec_tag = codec_tag;
if (!ost->frame_rate.num)
ost->frame_rate = ist->framerate;
if (ost->st->duration <= 0 && ist->st->duration > 0)
ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
+ if (!ost->copy_prior_start) {
+ ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ?
+ 0 : of->start_time;
+ if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) {
+ ost->ts_copy_start = FFMAX(ost->ts_copy_start,
+ ifile->start_time + ifile->ts_offset);
+ }
+ }
+
if (ist->st->nb_side_data) {
for (i = 0; i < ist->st->nb_side_data; i++) {
const AVPacketSideData *sd_src = &ist->st->side_data[i];
av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
}
- switch (par_dst->codec_type) {
+ switch (par->codec_type) {
case AVMEDIA_TYPE_AUDIO:
- if (audio_volume != 256) {
- av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
- exit_program(1);
- }
- if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
- par_dst->block_align= 0;
- if(par_dst->codec_id == AV_CODEC_ID_AC3)
- par_dst->block_align= 0;
+ if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) &&
+ par->codec_id == AV_CODEC_ID_MP3)
+ par->block_align = 0;
+ if (par->codec_id == AV_CODEC_ID_AC3)
+ par->block_align = 0;
break;
case AVMEDIA_TYPE_VIDEO:
if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
sar =
av_mul_q(ost->frame_aspect_ratio,
- (AVRational){ par_dst->height, par_dst->width });
+ (AVRational){ par->height, par->width });
av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
"with stream copy may produce invalid files\n");
}
else if (ist->st->sample_aspect_ratio.num)
sar = ist->st->sample_aspect_ratio;
else
- sar = par_src->sample_aspect_ratio;
- ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
+ sar = par->sample_aspect_ratio;
+ ost->st->sample_aspect_ratio = par->sample_aspect_ratio = sar;
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
ost->st->r_frame_rate = ist->st->r_frame_rate;
break;
static void set_encoder_id(OutputFile *of, OutputStream *ost)
{
- const AVDictionaryEntry *e;
-
uint8_t *encoder_string;
int encoder_string_len;
- int format_flags = 0;
- int codec_flags = ost->enc_ctx->flags;
if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
return;
- e = av_dict_get(of->opts, "fflags", NULL, 0);
- if (e) {
- const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
- if (!o)
- return;
- av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
- }
- e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
- if (e) {
- const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
- if (!o)
- return;
- av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
- }
-
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
encoder_string = av_mallocz(encoder_string_len);
if (!encoder_string)
exit_program(1);
- if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
+ if (!of->bitexact && !ost->bitexact)
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
else
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
*next++ = 0;
if (!memcmp(p, "chapters", 8)) {
-
- AVFormatContext *avf = output_files[ost->file_index]->ctx;
+ OutputFile *of = output_files[ost->file_index];
+ AVChapter * const *ch;
+ unsigned int nb_ch;
int j;
- if (avf->nb_chapters > INT_MAX - size ||
- !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
+ ch = of_get_chapters(of, &nb_ch);
+
+ if (nb_ch > INT_MAX - size ||
+ !(pts = av_realloc_f(pts, size += nb_ch - 1,
sizeof(*pts)))) {
av_log(NULL, AV_LOG_FATAL,
"Could not allocate forced key frames array.\n");
t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
- for (j = 0; j < avf->nb_chapters; j++) {
- AVChapter *c = avf->chapters[j];
+ for (j = 0; j < nb_ch; j++) {
+ const AVChapter *c = ch[j];
av_assert1(index < size);
pts[index++] = av_rescale_q(c->start, c->time_base,
avctx->time_base) + t;
{
InputStream *ist = get_input_stream(ost);
AVCodecContext *enc_ctx = ost->enc_ctx;
- AVFormatContext *oc;
if (ost->enc_timebase.num > 0) {
enc_ctx->time_base = ost->enc_timebase;
return;
}
- oc = output_files[ost->file_index]->ctx;
- av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
+ av_log(NULL, AV_LOG_WARNING,
+ "Input stream data for output stream #%d:%d not available, "
+ "using default time base\n", ost->file_index, ost->index);
}
enc_ctx->time_base = default_time_base;
AVCodecContext *enc_ctx = ost->enc_ctx;
AVCodecContext *dec_ctx = NULL;
OutputFile *of = output_files[ost->file_index];
- AVFormatContext *oc = of->ctx;
int ret;
set_encoder_id(output_files[ost->file_index], ost);
if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
&& (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
(ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
- av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
+ av_log(NULL, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
"Please consider specifying a lower framerate, a different muxer or "
"setting vsync/fps_mode to vfr\n");
}
case AVMEDIA_TYPE_SUBTITLE:
enc_ctx->time_base = AV_TIME_BASE_Q;
if (!enc_ctx->width) {
- enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
- enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
+ enc_ctx->width = input_streams[ost->source_index]->par->width;
+ enc_ctx->height = input_streams[ost->source_index]->par->height;
}
break;
case AVMEDIA_TYPE_DATA:
break;
}
+ if (ost->bitexact)
+ enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
+
+ if (ost->sq_idx_encode >= 0)
+ sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base);
+
ost->mux_timebase = enc_ctx->time_base;
return 0;
static int init_output_stream(OutputStream *ost, AVFrame *frame,
char *error, int error_len)
{
+ OutputFile *of = output_files[ost->file_index];
int ret = 0;
- if (ost->encoding_needed) {
+ if (ost->enc_ctx) {
const AVCodec *codec = ost->enc;
AVCodecContext *dec = NULL;
InputStream *ist;
// copy estimated duration as a hint to the muxer
if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
- } else if (ost->stream_copy) {
+ } else if (ost->source_index >= 0) {
ret = init_output_stream_streamcopy(ost);
if (ret < 0)
return ret;
if (ret < 0)
return ret;
+ if (ost->sq_idx_mux >= 0)
+ sq_set_tb(of->sq_mux, ost->sq_idx_mux, ost->mux_timebase);
+
ost->initialized = 1;
ret = of_check_init(output_files[ost->file_index]);
return ret;
}
-static void report_new_stream(int input_index, AVPacket *pkt)
-{
- InputFile *file = input_files[input_index];
- AVStream *st = file->ctx->streams[pkt->stream_index];
-
- if (pkt->stream_index < file->nb_streams_warn)
- return;
- av_log(file->ctx, AV_LOG_WARNING,
- "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
- av_get_media_type_string(st->codecpar->codec_type),
- input_index, pkt->stream_index,
- pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
- file->nb_streams_warn = pkt->stream_index + 1;
-}
-
static int transcode_init(void)
{
int ret = 0, i, j, k;
- AVFormatContext *oc;
OutputStream *ost;
InputStream *ist;
char error[1024] = {0};
input_streams[j + ifile->ist_index]->start = av_gettime_relative();
}
+ // Correct starttime based on the enabled streams
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *ifile = input_files[i];
+ AVFormatContext *is = ifile->ctx;
+ int64_t new_start_time = INT64_MAX;
+
+ if (is->start_time == AV_NOPTS_VALUE ||
+ !(is->iformat->flags & AVFMT_TS_DISCONT))
+ continue;
+
+ for (int j = 0; j < is->nb_streams; j++) {
+ AVStream *st = is->streams[j];
+ if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
+ continue;
+ new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
+ }
+ if (new_start_time > is->start_time) {
+ av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
+ ifile->ts_offset = -new_start_time;
+ }
+ }
+
/* init input streams */
for (i = 0; i < nb_input_streams; i++)
- if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
- for (i = 0; i < nb_output_streams; i++) {
- ost = output_streams[i];
- avcodec_close(ost->enc_ctx);
- }
+ if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
goto dump_format;
- }
/*
* initialize stream copy and subtitle/data streams.
* known after the encoder is initialized.
*/
for (i = 0; i < nb_output_streams; i++) {
- if (!output_streams[i]->stream_copy &&
- (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
- output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
+ if (output_streams[i]->enc_ctx &&
+ (output_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
+ output_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
continue;
ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
}
}
- /* write headers for files with no streams */
- for (i = 0; i < nb_output_files; i++) {
- oc = output_files[i]->ctx;
- if (output_files[i]->format->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
- ret = of_check_init(output_files[i]);
- if (ret < 0)
- goto dump_format;
- }
- }
-
dump_format:
/* dump the stream mapping */
av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
input_streams[ost->source_index]->st->index,
ost->file_index,
ost->index);
- if (ost->sync_ist != input_streams[ost->source_index])
- av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
- ost->sync_ist->file_index,
- ost->sync_ist->st->index);
- if (ost->stream_copy)
- av_log(NULL, AV_LOG_INFO, " (copy)");
- else {
+ if (ost->enc_ctx) {
const AVCodec *in_codec = input_streams[ost->source_index]->dec;
const AVCodec *out_codec = ost->enc;
const char *decoder_name = "?";
av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
in_codec_name, decoder_name,
out_codec_name, encoder_name);
- }
+ } else
+ av_log(NULL, AV_LOG_INFO, " (copy)");
av_log(NULL, AV_LOG_INFO, "\n");
}
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
- OutputFile *of = output_files[ost->file_index];
- AVFormatContext *os = output_files[ost->file_index]->ctx;
- if (ost->finished ||
- (os->pb && avio_tell(os->pb) >= of->limit_filesize))
- continue;
- if (ost->frame_number >= ost->max_frames) {
- int j;
- for (j = 0; j < of->ctx->nb_streams; j++)
- close_output_stream(output_streams[of->ost_index + j]);
+ if (ost->finished)
continue;
- }
return 1;
}
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
- int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
- av_rescale_q(ost->last_mux_dts, ost->st->time_base,
- AV_TIME_BASE_Q);
- if (ost->last_mux_dts == AV_NOPTS_VALUE)
- av_log(NULL, AV_LOG_DEBUG,
- "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
- ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
+ int64_t opts;
+
+ if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) {
+ opts = ost->last_filter_pts;
+ } else {
+ opts = ost->last_mux_dts == AV_NOPTS_VALUE ?
+ INT64_MIN : ost->last_mux_dts;
+ if (ost->last_mux_dts == AV_NOPTS_VALUE)
+ av_log(NULL, AV_LOG_DEBUG,
+ "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
+ ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
+ }
if (!ost->initialized && !ost->inputs_done)
return ost->unavailable ? NULL : ost;
if (key == '+') av_log_set_level(av_log_get_level()+10);
if (key == '-') av_log_set_level(av_log_get_level()-10);
if (key == 's') qp_hist ^= 1;
- if (key == 'h'){
- if (do_hex_dump){
- do_hex_dump = do_pkt_dump = 0;
- } else if(do_pkt_dump){
- do_hex_dump = 1;
- } else
- do_pkt_dump = 1;
- av_log_set_level(AV_LOG_DEBUG);
- }
if (key == 'c' || key == 'C'){
char buf[4096], target[64], command[256], arg[256] = {0};
double time;
}
for(i=0;i<nb_output_streams;i++) {
OutputStream *ost = output_streams[i];
- ost->enc_ctx->debug = debug;
+ if (ost->enc_ctx)
+ ost->enc_ctx->debug = debug;
}
if(debug) av_log_set_level(AV_LOG_DEBUG);
fprintf(stderr,"debug=%d\n", debug);
return 0;
}
-#if HAVE_THREADS
-static void *input_thread(void *arg)
-{
- InputFile *f = arg;
- AVPacket *pkt = f->pkt, *queue_pkt;
- unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
- int ret = 0;
-
- while (1) {
- ret = av_read_frame(f->ctx, pkt);
-
- if (ret == AVERROR(EAGAIN)) {
- av_usleep(10000);
- continue;
- }
- if (ret < 0) {
- av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
- break;
- }
- queue_pkt = av_packet_alloc();
- if (!queue_pkt) {
- av_packet_unref(pkt);
- av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
- break;
- }
- av_packet_move_ref(queue_pkt, pkt);
- ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
- if (flags && ret == AVERROR(EAGAIN)) {
- flags = 0;
- ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
- av_log(f->ctx, AV_LOG_WARNING,
- "Thread message queue blocking; consider raising the "
- "thread_queue_size option (current value: %d)\n",
- f->thread_queue_size);
- }
- if (ret < 0) {
- if (ret != AVERROR_EOF)
- av_log(f->ctx, AV_LOG_ERROR,
- "Unable to send packet to main thread: %s\n",
- av_err2str(ret));
- av_packet_free(&queue_pkt);
- av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
- break;
- }
- }
-
- return NULL;
-}
-
-static void free_input_thread(int i)
-{
- InputFile *f = input_files[i];
- AVPacket *pkt;
-
- if (!f || !f->in_thread_queue)
- return;
- av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
- while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
- av_packet_free(&pkt);
-
- pthread_join(f->thread, NULL);
- f->joined = 1;
- av_thread_message_queue_free(&f->in_thread_queue);
-}
-
-static void free_input_threads(void)
-{
- int i;
-
- for (i = 0; i < nb_input_files; i++)
- free_input_thread(i);
-}
-
-static int init_input_thread(int i)
-{
- int ret;
- InputFile *f = input_files[i];
-
- if (f->thread_queue_size < 0)
- f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
- if (!f->thread_queue_size)
- return 0;
-
- if (f->ctx->pb ? !f->ctx->pb->seekable :
- strcmp(f->ctx->iformat->name, "lavfi"))
- f->non_blocking = 1;
- ret = av_thread_message_queue_alloc(&f->in_thread_queue,
- f->thread_queue_size, sizeof(f->pkt));
- if (ret < 0)
- return ret;
-
- if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
- av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
- av_thread_message_queue_free(&f->in_thread_queue);
- return AVERROR(ret);
- }
-
- return 0;
-}
-
-static int init_input_threads(void)
-{
- int i, ret;
-
- for (i = 0; i < nb_input_files; i++) {
- ret = init_input_thread(i);
- if (ret < 0)
- return ret;
- }
- return 0;
-}
-
-static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
-{
- return av_thread_message_queue_recv(f->in_thread_queue, pkt,
- f->non_blocking ?
- AV_THREAD_MESSAGE_NONBLOCK : 0);
-}
-#endif
-
-static int get_input_packet(InputFile *f, AVPacket **pkt)
-{
- if (f->readrate || f->rate_emu) {
- int i;
- int64_t file_start = copy_ts * (
- (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
- (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
- );
- float scale = f->rate_emu ? 1.0 : f->readrate;
- for (i = 0; i < f->nb_streams; i++) {
- InputStream *ist = input_streams[f->ist_index + i];
- int64_t stream_ts_offset, pts, now;
- if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
- stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
- pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
- now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
- if (pts > now)
- return AVERROR(EAGAIN);
- }
- }
-
-#if HAVE_THREADS
- if (f->thread_queue_size)
- return get_input_packet_mt(f, pkt);
-#endif
- *pkt = f->pkt;
- return av_read_frame(f->ctx, *pkt);
-}
-
static int got_eagain(void)
{
int i;
output_streams[i]->unavailable = 0;
}
-// set duration to max(tmp, duration) in a proper time base and return duration's time_base
-static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
- AVRational time_base)
+static void decode_flush(InputFile *ifile)
{
- int ret;
+ for (int i = 0; i < ifile->nb_streams; i++) {
+ InputStream *ist = input_streams[ifile->ist_index + i];
+ int ret;
- if (!*duration) {
- *duration = tmp;
- return tmp_time_base;
- }
-
- ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
- if (ret < 0) {
- *duration = tmp;
- return tmp_time_base;
- }
+ if (!ist->processing_needed)
+ continue;
- return time_base;
-}
+ do {
+ ret = process_input_packet(ist, NULL, 1);
+ } while (ret > 0);
-static int seek_to_start(InputFile *ifile, AVFormatContext *is)
-{
- InputStream *ist;
- AVCodecContext *avctx;
- int i, ret, has_audio = 0;
- int64_t duration = 0;
+ if (ist->decoding_needed) {
+ /* report last frame duration to the demuxer thread */
+ if (ist->par->codec_type == AVMEDIA_TYPE_AUDIO) {
+ LastFrameDuration dur;
- ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
- if (ret < 0)
- return ret;
+ dur.stream_idx = i;
+ dur.duration = av_rescale_q(ist->nb_samples,
+ (AVRational){ 1, ist->dec_ctx->sample_rate},
+ ist->st->time_base);
- for (i = 0; i < ifile->nb_streams; i++) {
- ist = input_streams[ifile->ist_index + i];
- avctx = ist->dec_ctx;
+ av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0);
+ }
- /* duration is the length of the last frame in a stream
- * when audio stream is present we don't care about
- * last video frame length because it's not defined exactly */
- if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
- has_audio = 1;
+ avcodec_flush_buffers(ist->dec_ctx);
+ }
}
+}
- for (i = 0; i < ifile->nb_streams; i++) {
- ist = input_streams[ifile->ist_index + i];
- avctx = ist->dec_ctx;
+static void ts_discontinuity_detect(InputFile *ifile, InputStream *ist,
+ AVPacket *pkt)
+{
+ const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
+ int disable_discontinuity_correction = copy_ts;
+ int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q,
+ AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
- if (has_audio) {
- if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
- AVRational sample_rate = {1, avctx->sample_rate};
+ if (copy_ts && ist->next_dts != AV_NOPTS_VALUE &&
+ fmt_is_discont && ist->st->pts_wrap_bits < 60) {
+ int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
+ ist->st->time_base, AV_TIME_BASE_Q,
+ AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
+ disable_discontinuity_correction = 0;
+ }
- duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
- } else {
- continue;
+ if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
+ int64_t delta = pkt_dts - ist->next_dts;
+ if (fmt_is_discont) {
+ if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
+ pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
+ ifile->ts_offset_discont -= delta;
+ av_log(NULL, AV_LOG_DEBUG,
+ "timestamp discontinuity for stream #%d:%d "
+ "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
+ ist->file_index, ist->st->index, ist->st->id,
+ av_get_media_type_string(ist->par->codec_type),
+ delta, ifile->ts_offset_discont);
+ pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
}
} else {
- if (ist->framerate.num) {
- duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
- } else if (ist->st->avg_frame_rate.num) {
- duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
- } else {
- duration = 1;
+ if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
+ av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
+ pkt->dts = AV_NOPTS_VALUE;
+ }
+ if (pkt->pts != AV_NOPTS_VALUE){
+ int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
+ delta = pkt_pts - ist->next_dts;
+ if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
+ av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
+ pkt->pts = AV_NOPTS_VALUE;
+ }
}
}
- if (!ifile->duration)
- ifile->time_base = ist->st->time_base;
- /* the total duration of the stream, max_pts - min_pts is
- * the duration of the stream without the last frame */
- if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
- duration += ist->max_pts - ist->min_pts;
- ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
- ifile->time_base);
+ } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts &&
+ fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) {
+ int64_t delta = pkt_dts - ifile->last_ts;
+ if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
+ ifile->ts_offset_discont -= delta;
+ av_log(NULL, AV_LOG_DEBUG,
+ "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+ delta, ifile->ts_offset_discont);
+ pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ }
}
- if (ifile->loop > 0)
- ifile->loop--;
+ ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+}
+
+static void ts_discontinuity_process(InputFile *ifile, InputStream *ist,
+ AVPacket *pkt)
+{
+ int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q,
+ ist->st->time_base);
- return ret;
+ // apply previously-detected timestamp-discontinuity offset
+ // (to all streams, not just audio/video)
+ if (pkt->dts != AV_NOPTS_VALUE)
+ pkt->dts += offset;
+ if (pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts += offset;
+
+ // detect timestamp discontinuities for audio/video
+ if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO ||
+ ist->par->codec_type == AVMEDIA_TYPE_AUDIO) &&
+ pkt->dts != AV_NOPTS_VALUE)
+ ts_discontinuity_detect(ifile, ist, pkt);
}
/*
AVFormatContext *is;
InputStream *ist;
AVPacket *pkt;
- int ret, thread_ret, i, j;
- int64_t duration;
- int64_t pkt_dts;
- int disable_discontinuity_correction = copy_ts;
+ int ret, i, j;
is = ifile->ctx;
- ret = get_input_packet(ifile, &pkt);
+ ret = ifile_get_packet(ifile, &pkt);
if (ret == AVERROR(EAGAIN)) {
ifile->eagain = 1;
return ret;
}
- if (ret < 0 && ifile->loop) {
- AVCodecContext *avctx;
- for (i = 0; i < ifile->nb_streams; i++) {
- ist = input_streams[ifile->ist_index + i];
- avctx = ist->dec_ctx;
- if (ist->processing_needed) {
- ret = process_input_packet(ist, NULL, 1);
- if (ret>0)
- return 0;
- if (ist->decoding_needed)
- avcodec_flush_buffers(avctx);
- }
- }
-#if HAVE_THREADS
- free_input_thread(file_index);
-#endif
- ret = seek_to_start(ifile, is);
-#if HAVE_THREADS
- thread_ret = init_input_thread(file_index);
- if (thread_ret < 0)
- return thread_ret;
-#endif
- if (ret < 0)
- av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
- else
- ret = get_input_packet(ifile, &pkt);
- if (ret == AVERROR(EAGAIN)) {
- ifile->eagain = 1;
- return ret;
- }
+ if (ret == 1) {
+ /* the input file is looped: flush the decoders */
+ decode_flush(ifile);
+ return AVERROR(EAGAIN);
}
if (ret < 0) {
if (ret != AVERROR_EOF) {
OutputStream *ost = output_streams[j];
if (ost->source_index == ifile->ist_index + i &&
- (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
- finish_output_stream(ost);
+ (!ost->enc_ctx || ost->enc->type == AVMEDIA_TYPE_SUBTITLE)) {
+ OutputFile *of = output_files[ost->file_index];
+ output_packet(of, ost->pkt, ost, 1);
+ }
}
}
reset_eagain();
- if (do_pkt_dump) {
- av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
- is->streams[pkt->stream_index]);
- }
- /* the following test is needed in case new streams appear
- dynamically in stream : we ignore them */
- if (pkt->stream_index >= ifile->nb_streams) {
- report_new_stream(file_index, pkt);
- goto discard_packet;
- }
-
ist = input_streams[ifile->ist_index + pkt->stream_index];
ist->data_size += pkt->size;
if (ist->discard)
goto discard_packet;
- if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
- av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
- "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
- if (exit_on_error)
- exit_program(1);
- }
-
- if (debug_ts) {
- av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
- "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
- ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
- av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
- av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
- av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
- av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
- av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
- av_ts2str(input_files[ist->file_index]->ts_offset),
- av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
- }
-
- if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
- int64_t stime, stime2;
- // Correcting starttime based on the enabled streams
- // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
- // so we instead do it here as part of discontinuity handling
- if ( ist->next_dts == AV_NOPTS_VALUE
- && ifile->ts_offset == -is->start_time
- && (is->iformat->flags & AVFMT_TS_DISCONT)) {
- int64_t new_start_time = INT64_MAX;
- for (i=0; i<is->nb_streams; i++) {
- AVStream *st = is->streams[i];
- if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
- continue;
- new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
- }
- if (new_start_time > is->start_time) {
- av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
- ifile->ts_offset = -new_start_time;
- }
- }
-
- stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
- stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
- ist->wrap_correction_done = 1;
-
- if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
- pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
- ist->wrap_correction_done = 0;
- }
- if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
- pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
- ist->wrap_correction_done = 0;
- }
- }
-
/* add the stream-global side data to the first packet */
if (ist->nb_packets == 1) {
for (i = 0; i < ist->st->nb_side_data; i++) {
}
}
- if (pkt->dts != AV_NOPTS_VALUE)
- pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
- if (pkt->pts != AV_NOPTS_VALUE)
- pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
-
- if (pkt->pts != AV_NOPTS_VALUE)
- pkt->pts *= ist->ts_scale;
- if (pkt->dts != AV_NOPTS_VALUE)
- pkt->dts *= ist->ts_scale;
-
- pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
- if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
- ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
- pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
- && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
- int64_t delta = pkt_dts - ifile->last_ts;
- if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
- delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
- ifile->ts_offset -= delta;
- av_log(NULL, AV_LOG_DEBUG,
- "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
- delta, ifile->ts_offset);
- pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
- if (pkt->pts != AV_NOPTS_VALUE)
- pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
- }
- }
-
- duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
- if (pkt->pts != AV_NOPTS_VALUE) {
- pkt->pts += duration;
- ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
- ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
- }
-
- if (pkt->dts != AV_NOPTS_VALUE)
- pkt->dts += duration;
-
- pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
-
- if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
- (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
- int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
- ist->st->time_base, AV_TIME_BASE_Q,
- AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
- if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
- disable_discontinuity_correction = 0;
- }
-
- if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
- ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
- pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
- !disable_discontinuity_correction) {
- int64_t delta = pkt_dts - ist->next_dts;
- if (is->iformat->flags & AVFMT_TS_DISCONT) {
- if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
- delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
- pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
- ifile->ts_offset -= delta;
- av_log(NULL, AV_LOG_DEBUG,
- "timestamp discontinuity for stream #%d:%d "
- "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
- ist->file_index, ist->st->index, ist->st->id,
- av_get_media_type_string(ist->dec_ctx->codec_type),
- delta, ifile->ts_offset);
- pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
- if (pkt->pts != AV_NOPTS_VALUE)
- pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
- }
- } else {
- if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
- delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
- av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
- pkt->dts = AV_NOPTS_VALUE;
- }
- if (pkt->pts != AV_NOPTS_VALUE){
- int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
- delta = pkt_pts - ist->next_dts;
- if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
- delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
- av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
- pkt->pts = AV_NOPTS_VALUE;
- }
- }
- }
- }
-
- if (pkt->dts != AV_NOPTS_VALUE)
- ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+ // detect and try to correct for timestamp discontinuities
+ ts_discontinuity_process(ifile, ist, pkt);
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
- ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
+ ifile->ist_index + pkt->stream_index,
+ av_get_media_type_string(ist->par->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
process_input_packet(ist, pkt, 0);
discard_packet:
-#if HAVE_THREADS
- if (ifile->thread_queue_size)
- av_packet_free(&pkt);
- else
-#endif
- av_packet_unref(pkt);
+ av_packet_free(&pkt);
return 0;
}
static int transcode(void)
{
int ret, i;
- AVFormatContext *os;
OutputStream *ost;
InputStream *ist;
int64_t timer_start;
timer_start = av_gettime_relative();
-#if HAVE_THREADS
if ((ret = init_input_threads()) < 0)
goto fail;
-#endif
while (!received_sigterm) {
int64_t cur_time= av_gettime_relative();
/* dump report by using the output first video and audio streams */
print_report(0, timer_start, cur_time);
}
-#if HAVE_THREADS
free_input_threads();
-#endif
/* at the end of stream, we must flush the decoder buffers */
for (i = 0; i < nb_input_streams; i++) {
/* dump report by using the first video and audio streams */
print_report(1, timer_start, av_gettime_relative());
- /* close the output files */
- for (i = 0; i < nb_output_files; i++) {
- os = output_files[i]->ctx;
- if (os && os->oformat && !(os->oformat->flags & AVFMT_NOFILE)) {
- if ((ret = avio_closep(&os->pb)) < 0) {
- av_log(NULL, AV_LOG_ERROR, "Error closing file %s: %s\n", os->url, av_err2str(ret));
- if (exit_on_error)
- exit_program(1);
- }
- }
- }
-
/* close each encoder */
for (i = 0; i < nb_output_streams; i++) {
+ uint64_t packets_written;
ost = output_streams[i];
- if (ost->encoding_needed) {
- av_freep(&ost->enc_ctx->stats_in);
- }
- total_packets_written += ost->packets_written;
- if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
+ packets_written = atomic_load(&ost->packets_written);
+ total_packets_written += packets_written;
+ if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
exit_program(1);
}
ist = input_streams[i];
if (ist->decoding_needed) {
avcodec_close(ist->dec_ctx);
- if (ist->hwaccel_uninit)
- ist->hwaccel_uninit(ist->dec_ctx);
}
}
ret = 0;
fail:
-#if HAVE_THREADS
free_input_threads();
-#endif
if (output_streams) {
for (i = 0; i < nb_output_streams; i++) {
int main(int argc, char **argv)
{
- int i, ret;
+ int ret;
BenchmarkTimeStamps ti;
init_dynload();
exit_program(1);
}
- for (i = 0; i < nb_output_files; i++) {
- if (strcmp(output_files[i]->format->name, "rtp"))
- want_sdp = 0;
- }
-
current_time = ti = get_benchmark_time_stamps();
if (transcode() < 0)
exit_program(1);