#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
#define MIN_FRAMES 5
-/* SDL audio buffer size, in samples. Should be small to have precise
- A/V sync as SDL does not have hardware buffer fullness info. */
-#define SDL_AUDIO_BUFFER_SIZE 1024
+/* Minimum SDL audio buffer size, in samples. */
+#define SDL_AUDIO_MIN_BUFFER_SIZE 512
+/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
+#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
/* no AV sync correction is done if below the minimum AV sync threshold */
-#define AV_SYNC_THRESHOLD_MIN 0.01
+#define AV_SYNC_THRESHOLD_MIN 0.04
/* AV sync correction is done if above the maximum AV sync threshold */
#define AV_SYNC_THRESHOLD_MAX 0.1
/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
} PacketQueue;
#define VIDEO_PICTURE_QUEUE_SIZE 3
-#define SUBPICTURE_QUEUE_SIZE 4
+#define SUBPICTURE_QUEUE_SIZE 16
typedef struct VideoPicture {
double pts; // presentation timestamp for this picture
int channels;
int64_t channel_layout;
enum AVSampleFormat fmt;
+ int frame_size;
+ int bytes_per_sec;
} AudioParams;
typedef struct Clock {
AVStream *audio_st;
PacketQueue audioq;
int audio_hw_buf_size;
- uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
+ uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
uint8_t *audio_buf;
uint8_t *audio_buf1;
unsigned int audio_buf_size; /* in bytes */
int64_t video_current_pos; // current displayed file pos
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
- int pictq_size, pictq_rindex, pictq_windex;
+ int pictq_size, pictq_rindex, pictq_windex, pictq_rindex_shown;
SDL_mutex *pictq_mutex;
SDL_cond *pictq_cond;
#if !CONFIG_AVFILTER
int step;
#if CONFIG_AVFILTER
+ int vfilter_idx;
AVFilterContext *in_video_filter; // the first filter in the video chain
AVFilterContext *out_video_filter; // the last filter in the video chain
AVFilterContext *in_audio_filter; // the first filter in the audio chain
static int fast = 0;
static int genpts = 0;
static int lowres = 0;
-static int error_concealment = 3;
static int decoder_reorder_pts = -1;
static int autoexit;
static int exit_on_keydown;
static int64_t cursor_last_shown;
static int cursor_hidden = 0;
#if CONFIG_AVFILTER
-static char *vfilters = NULL;
+static const char **vfilters_list = NULL;
+static int nb_vfilters = 0;
static char *afilters = NULL;
#endif
+static int autorotate = 1;
/* current context */
static int is_full_screen;
static SDL_Surface *screen;
+#if CONFIG_AVFILTER
+static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
+{
+ GROW_ARRAY(vfilters_list, nb_vfilters);
+ vfilters_list[nb_vfilters - 1] = arg;
+ return 0;
+}
+#endif
+
static inline
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
enum AVSampleFormat fmt2, int64_t channel_count2)
return 0;
}
-static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
-
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
{
MyAVPacketList *pkt1;
MyAVPacketList *pkt, *pkt1;
SDL_LockMutex(q->mutex);
- for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
+ for (pkt = q->first_pkt; pkt; pkt = pkt1) {
pkt1 = pkt->next;
av_free_packet(&pkt->pkt);
av_freep(&pkt);
avsubtitle_free(&sp->sub);
}
-static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
+static void calculate_display_rect(SDL_Rect *rect,
+ int scr_xleft, int scr_ytop, int scr_width, int scr_height,
+ int pic_width, int pic_height, AVRational pic_sar)
{
float aspect_ratio;
int width, height, x, y;
- if (vp->sar.num == 0)
+ if (pic_sar.num == 0)
aspect_ratio = 0;
else
- aspect_ratio = av_q2d(vp->sar);
+ aspect_ratio = av_q2d(pic_sar);
if (aspect_ratio <= 0.0)
aspect_ratio = 1.0;
- aspect_ratio *= (float)vp->width / (float)vp->height;
+ aspect_ratio *= (float)pic_width / (float)pic_height;
/* XXX: we suppose the screen has a 1.0 pixel ratio */
height = scr_height;
SDL_Rect rect;
int i;
- vp = &is->pictq[is->pictq_rindex];
+ vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
if (vp->bmp) {
if (is->subtitle_st) {
if (is->subpq_size > 0) {
}
}
- calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
+ calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
/* to be more precise, we take into account the time spent since
the last buffer computation */
if (audio_callback_time) {
- time_diff = av_gettime() - audio_callback_time;
+ time_diff = av_gettime_relative() - audio_callback_time;
delay -= (time_diff * s->audio_tgt.freq) / 1000000;
}
av_free(s->rdft_data);
s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
s->rdft_bits = rdft_bits;
- s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
+ s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
}
{
FFTSample *data[2];
av_lockmgr_register(NULL);
uninit_opts();
#if CONFIG_AVFILTER
- av_freep(&vfilters);
+ av_freep(&vfilters_list);
#endif
avformat_network_deinit();
if (show_status)
exit(123);
}
-static void set_default_window_size(VideoPicture *vp)
+static void set_default_window_size(int width, int height, AVRational sar)
{
SDL_Rect rect;
- calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
+ calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
default_width = rect.w;
default_height = rect.h;
}
else flags |= SDL_RESIZABLE;
if (vp && vp->width)
- set_default_window_size(vp);
+ set_default_window_size(vp->width, vp->height, vp->sar);
if (is_full_screen && fs_screen_width) {
w = fs_screen_width;
if (c->paused) {
return c->pts;
} else {
- double time = av_gettime() / 1000000.0;
+ double time = av_gettime_relative() / 1000000.0;
return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
}
}
static void set_clock(Clock *c, double pts, int serial)
{
- double time = av_gettime() / 1000000.0;
+ double time = av_gettime_relative() / 1000000.0;
set_clock_at(c, pts, serial, time);
}
static void stream_toggle_pause(VideoState *is)
{
if (is->paused) {
- is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
+ is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
if (is->read_pause_return != AVERROR(ENOSYS)) {
is->vidclk.paused = 0;
}
}
}
+/* return the number of undisplayed pictures in the queue */
+static int pictq_nb_remaining(VideoState *is) {
+ return is->pictq_size - is->pictq_rindex_shown;
+}
+
+/* jump back to the previous picture if available by resetting rindex_shown */
+static int pictq_prev_picture(VideoState *is) {
+ int ret = is->pictq_rindex_shown;
+ is->pictq_rindex_shown = 0;
+ return ret;
+}
+
static void pictq_next_picture(VideoState *is) {
+ if (!is->pictq_rindex_shown) {
+ is->pictq_rindex_shown = 1;
+ return;
+ }
/* update queue size and signal for next picture */
if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
is->pictq_rindex = 0;
SDL_UnlockMutex(is->pictq_mutex);
}
-static int pictq_prev_picture(VideoState *is) {
- VideoPicture *prevvp;
- int ret = 0;
- /* update queue size and signal for the previous picture */
- prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
- if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
- SDL_LockMutex(is->pictq_mutex);
- if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
- if (--is->pictq_rindex == -1)
- is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
- is->pictq_size++;
- ret = 1;
- }
- SDL_CondSignal(is->pictq_cond);
- SDL_UnlockMutex(is->pictq_mutex);
- }
- return ret;
-}
-
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
/* update current video pts */
set_clock(&is->vidclk, pts, serial);
check_external_clock_speed(is);
if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
- time = av_gettime() / 1000000.0;
+ time = av_gettime_relative() / 1000000.0;
if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
video_display(is);
is->last_vis_time = time;
if (is->force_refresh)
redisplay = pictq_prev_picture(is);
retry:
- if (is->pictq_size == 0) {
+ if (pictq_nb_remaining(is) == 0) {
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
VideoPicture *vp, *lastvp;
/* dequeue the picture */
- vp = &is->pictq[is->pictq_rindex];
- lastvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
+ lastvp = &is->pictq[is->pictq_rindex];
+ vp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown) % VIDEO_PICTURE_QUEUE_SIZE];
if (vp->serial != is->videoq.serial) {
pictq_next_picture(is);
}
if (lastvp->serial != vp->serial && !redisplay)
- is->frame_timer = av_gettime() / 1000000.0;
+ is->frame_timer = av_gettime_relative() / 1000000.0;
if (is->paused)
goto display;
else
delay = compute_target_delay(last_duration, is);
- time= av_gettime()/1000000.0;
+ time= av_gettime_relative()/1000000.0;
if (time < is->frame_timer + delay && !redisplay) {
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
return;
update_video_pts(is, vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq_mutex);
- if (is->pictq_size > 1) {
- VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
+ if (pictq_nb_remaining(is) > 1) {
+ VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + is->pictq_rindex_shown + 1) % VIDEO_PICTURE_QUEUE_SIZE];
duration = vp_duration(is, vp, nextvp);
if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
if (!redisplay)
int aqsize, vqsize, sqsize;
double av_diff;
- cur_time = av_gettime();
+ cur_time = av_gettime_relative();
if (!last_time || (cur_time - last_time) >= 30000) {
aqsize = 0;
vqsize = 0;
/* wait until we have space to put a new picture */
SDL_LockMutex(is->pictq_mutex);
- /* keep the last already displayed picture in the queue */
- while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
+ while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
!is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
}
/* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
- while (!vp->allocated) {
+ while (!vp->allocated && !is->abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
}
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
vp->width, vp->height, src_frame->format, vp->width, vp->height,
AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
- if (is->img_convert_ctx == NULL) {
+ if (!is->img_convert_ctx) {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
exit(1);
}
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
{
- int ret;
+ int ret, i;
+ int nb_filters = graph->nb_filters;
AVFilterInOut *outputs = NULL, *inputs = NULL;
if (filtergraph) {
goto fail;
}
+ /* Reorder the filters to ensure that inputs of the custom filters are merged first */
+ for (i = 0; i < graph->nb_filters - nb_filters; i++)
+ FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
+
ret = avfilter_graph_config(graph, NULL);
fail:
avfilter_inout_free(&outputs);
char sws_flags_str[128];
char buffersrc_args[256];
int ret;
- AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
+ AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
AVCodecContext *codec = is->video_st->codec;
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
goto fail;
+ last_filter = filt_out;
+
+/* Note: this macro adds a filter before the lastly added filter, so the
+ * processing order of the filters is in reverse */
+#define INSERT_FILT(name, arg) do { \
+ AVFilterContext *filt_ctx; \
+ \
+ ret = avfilter_graph_create_filter(&filt_ctx, \
+ avfilter_get_by_name(name), \
+ "ffplay_" name, arg, NULL, graph); \
+ if (ret < 0) \
+ goto fail; \
+ \
+ ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
+ if (ret < 0) \
+ goto fail; \
+ \
+ last_filter = filt_ctx; \
+} while (0)
+
/* SDL YUV code is not handling odd width/height for some driver
* combinations, therefore we crop the picture to an even width/height. */
- if ((ret = avfilter_graph_create_filter(&filt_crop,
- avfilter_get_by_name("crop"),
- "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
- goto fail;
- if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
- goto fail;
+ INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
+
+ if (autorotate) {
+ AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
+ if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
+ if (!strcmp(rotate_tag->value, "90")) {
+ INSERT_FILT("transpose", "clock");
+ } else if (!strcmp(rotate_tag->value, "180")) {
+ INSERT_FILT("hflip", NULL);
+ INSERT_FILT("vflip", NULL);
+ } else if (!strcmp(rotate_tag->value, "270")) {
+ INSERT_FILT("transpose", "cclock");
+ } else {
+ char rotate_buf[64];
+ snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
+ INSERT_FILT("rotate", rotate_buf);
+ }
+ }
+ }
- if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
+ if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
goto fail;
is->in_video_filter = filt_src;
int last_h = 0;
enum AVPixelFormat last_format = -2;
int last_serial = -1;
+ int last_vfilter_idx = 0;
#endif
for (;;) {
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
- avcodec_get_frame_defaults(frame);
av_free_packet(&pkt);
ret = get_video_frame(is, frame, &pkt, &serial);
if ( last_w != frame->width
|| last_h != frame->height
|| last_format != frame->format
- || last_serial != serial) {
+ || last_serial != serial
+ || last_vfilter_idx != is->vfilter_idx) {
av_log(NULL, AV_LOG_DEBUG,
"Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
last_w, last_h,
(const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
- if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
+ if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
- av_free_packet(&pkt);
goto the_end;
}
filt_in = is->in_video_filter;
last_h = frame->height;
last_format = frame->format;
last_serial = serial;
+ last_vfilter_idx = is->vfilter_idx;
frame_rate = filt_out->inputs[0]->frame_rate;
}
ret = av_buffersrc_add_frame(filt_in, frame);
if (ret < 0)
goto the_end;
- av_frame_unref(frame);
- avcodec_get_frame_defaults(frame);
- av_free_packet(&pkt);
while (ret >= 0) {
- is->frame_last_returned_time = av_gettime() / 1000000.0;
+ is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
if (ret < 0) {
break;
}
- is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
+ is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
tb = filt_out->inputs[0]->time_base;
return AVERROR(ENOMEM);
} else {
av_frame_unref(is->frame);
- avcodec_get_frame_defaults(is->frame);
}
if (is->audioq.serial != is->audio_pkt_temp_serial)
if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
return ret;
- av_frame_unref(is->frame);
#endif
}
#if CONFIG_AVFILTER
{
VideoState *is = opaque;
int audio_size, len1;
- int bytes_per_sec;
- int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
- audio_callback_time = av_gettime();
+ audio_callback_time = av_gettime_relative();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = is->silence_buf;
- is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
+ is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
} else {
if (is->show_mode != SHOW_MODE_VIDEO)
update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
stream += len1;
is->audio_buf_index += len1;
}
- bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
if (!isnan(is->audio_clock)) {
- set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
+ set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
sync_clock_to_slave(&is->extclk, &is->audclk);
}
}
SDL_AudioSpec wanted_spec, spec;
const char *env;
static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
+ static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
+ int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
env = SDL_getenv("SDL_AUDIO_CHANNELS");
if (env) {
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
}
- wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
+ wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
+ wanted_spec.channels = wanted_nb_channels;
wanted_spec.freq = wanted_sample_rate;
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
return -1;
}
+ while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
+ next_sample_rate_idx--;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.silence = 0;
- wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
+ wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
wanted_spec.callback = sdl_audio_callback;
wanted_spec.userdata = opaque;
while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
- av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
+ av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
+ wanted_spec.channels, wanted_spec.freq, SDL_GetError());
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
if (!wanted_spec.channels) {
- av_log(NULL, AV_LOG_ERROR,
- "No more channel combinations to try, audio open failed\n");
- return -1;
+ wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
+ wanted_spec.channels = wanted_nb_channels;
+ if (!wanted_spec.freq) {
+ av_log(NULL, AV_LOG_ERROR,
+ "No more combinations to try, audio open failed\n");
+ return -1;
+ }
}
wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
}
audio_hw_params->freq = spec.freq;
audio_hw_params->channel_layout = wanted_channel_layout;
audio_hw_params->channels = spec.channels;
+ audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
+ audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
+ if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
+ av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
+ return -1;
+ }
return spec.size;
}
stream_lowres = av_codec_get_max_lowres(codec);
}
av_codec_set_lowres(avctx, stream_lowres);
- avctx->error_concealment = error_concealment;
if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
if (!av_dict_get(opts, "threads", NULL, 0))
av_dict_set(&opts, "threads", "auto", 0);
if (stream_lowres)
- av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
+ av_dict_set_int(&opts, "lowres", stream_lowres, 0);
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
av_dict_set(&opts, "refcounted_frames", "1", 0);
if (avcodec_open2(avctx, codec, &opts) < 0)
is->audio_diff_avg_count = 0;
/* since we do not have a precise anough audio fifo fullness,
we correct audio sync only if larger than this threshold */
- is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
+ is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
if (genpts)
ic->flags |= AVFMT_FLAG_GENPTS;
+ av_format_inject_global_side_data(ic);
+
opts = setup_find_stream_info_opts(ic, codec_opts);
orig_nb_streams = ic->nb_streams;
av_freep(&opts);
if (ic->pb)
- ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
+ ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
if (seek_by_bytes < 0)
seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
}
is->show_mode = show_mode;
+ if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+ AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
+ AVCodecContext *avctx = st->codec;
+ AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
+ if (avctx->width)
+ set_default_window_size(avctx->width, avctx->height, sar);
+ }
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
}
if (!is->paused &&
(!is->audio_st || is->audio_finished == is->audioq.serial) &&
- (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
+ (!is->video_st || (is->video_finished == is->videoq.serial && pictq_nb_remaining(is) == 0))) {
if (loop != 1 && (!loop || --loop)) {
stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
} else if (autoexit) {
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
if (is->audio_stream >= 0)
packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
+ if (is->subtitle_stream >= 0)
+ packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
SDL_Delay(10);
eof=0;
continue;
}
ret = av_read_frame(ic, pkt);
if (ret < 0) {
- if (ret == AVERROR_EOF || url_feof(ic->pb))
+ if (ret == AVERROR_EOF || avio_feof(ic->pb))
eof = 1;
if (ic->pb && ic->pb->error)
break;
the_end:
if (p && stream_index != -1)
stream_index = p->stream_index[stream_index];
+ av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
+ av_get_media_type_string(codec_type),
+ old_index,
+ stream_index);
+
stream_component_close(is, old_index);
stream_component_open(is, stream_index);
}
double remaining_time = 0.0;
SDL_PumpEvents();
while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
- if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
+ if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
SDL_ShowCursor(0);
cursor_hidden = 1;
}
}
}
+static void seek_chapter(VideoState *is, int incr)
+{
+ int64_t pos = get_master_clock(is) * AV_TIME_BASE;
+ int i;
+
+ if (!is->ic->nb_chapters)
+ return;
+
+ /* find the current chapter */
+ for (i = 0; i < is->ic->nb_chapters; i++) {
+ AVChapter *ch = is->ic->chapters[i];
+ if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
+ i--;
+ break;
+ }
+ }
+
+ i += incr;
+ i = FFMAX(i, 0);
+ if (i >= is->ic->nb_chapters)
+ return;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
+ stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
+ AV_TIME_BASE_Q), 0, 0);
+}
+
/* handle an event sent by the GUI */
static void event_loop(VideoState *cur_stream)
{
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
break;
case SDLK_w:
+#if CONFIG_AVFILTER
+ if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
+ if (++cur_stream->vfilter_idx >= nb_vfilters)
+ cur_stream->vfilter_idx = 0;
+ } else {
+ cur_stream->vfilter_idx = 0;
+ toggle_audio_display(cur_stream);
+ }
+#else
toggle_audio_display(cur_stream);
+#endif
break;
case SDLK_PAGEUP:
- incr = 600.0;
- goto do_seek;
+ if (cur_stream->ic->nb_chapters <= 1) {
+ incr = 600.0;
+ goto do_seek;
+ }
+ seek_chapter(cur_stream, 1);
+ break;
case SDLK_PAGEDOWN:
- incr = -600.0;
- goto do_seek;
+ if (cur_stream->ic->nb_chapters <= 1) {
+ incr = -600.0;
+ goto do_seek;
+ }
+ seek_chapter(cur_stream, -1);
+ break;
case SDLK_LEFT:
incr = -10.0;
goto do_seek;
SDL_ShowCursor(1);
cursor_hidden = 0;
}
- cursor_last_shown = av_gettime();
+ cursor_last_shown = av_gettime_relative();
if (event.type == SDL_MOUSEBUTTONDOWN) {
x = event.button.x;
} else {
{ "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
{ "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
{ "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
- { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
{ "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
{ "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
{ "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
{ "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
#if CONFIG_AVFILTER
- { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
+ { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
#endif
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
{ "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
{ "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
{ "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
+ { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
{ NULL, },
};
"v cycle video channel\n"
"t cycle subtitle channel in the current program\n"
"c cycle program\n"
- "w show audio waves\n"
+ "w cycle video filters or show modes\n"
"s activate frame-step mode\n"
"left/right seek backward/forward 10 seconds\n"
"down/up seek backward/forward 1 minute\n"
flags &= ~SDL_INIT_AUDIO;
if (display_disable)
SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
-#if !defined(__MINGW32__) && !defined(__APPLE__)
+#if !defined(_WIN32) && !defined(__APPLE__)
flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
#endif
if (SDL_Init (flags)) {