430731530da2501bf295f450e87918f8c82cb389
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
82 #define EXTERNAL_CLOCK_SPEED_MIN  0.900
83 #define EXTERNAL_CLOCK_SPEED_MAX  1.010
84 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
85
86 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
87 #define AUDIO_DIFF_AVG_NB   20
88
89 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
90 #define REFRESH_RATE 0.01
91
92 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
93 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
94 #define SAMPLE_ARRAY_SIZE (8 * 65536)
95
96 #define CURSOR_HIDE_DELAY 1000000
97
98 static int64_t sws_flags = SWS_BICUBIC;
99
100 typedef struct MyAVPacketList {
101     AVPacket pkt;
102     struct MyAVPacketList *next;
103     int serial;
104 } MyAVPacketList;
105
106 typedef struct PacketQueue {
107     MyAVPacketList *first_pkt, *last_pkt;
108     int nb_packets;
109     int size;
110     int abort_request;
111     int serial;
112     SDL_mutex *mutex;
113     SDL_cond *cond;
114 } PacketQueue;
115
116 #define VIDEO_PICTURE_QUEUE_SIZE 4
117 #define SUBPICTURE_QUEUE_SIZE 4
118
119 typedef struct VideoPicture {
120     double pts;             // presentation timestamp for this picture
121     int64_t pos;            // byte position in file
122     SDL_Overlay *bmp;
123     int width, height; /* source height & width */
124     int allocated;
125     int reallocate;
126     int serial;
127
128     AVRational sar;
129 } VideoPicture;
130
131 typedef struct SubPicture {
132     double pts; /* presentation time stamp for this picture */
133     AVSubtitle sub;
134 } SubPicture;
135
136 typedef struct AudioParams {
137     int freq;
138     int channels;
139     int64_t channel_layout;
140     enum AVSampleFormat fmt;
141 } AudioParams;
142
143 enum {
144     AV_SYNC_AUDIO_MASTER, /* default choice */
145     AV_SYNC_VIDEO_MASTER,
146     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
147 };
148
149 typedef struct VideoState {
150     SDL_Thread *read_tid;
151     SDL_Thread *video_tid;
152     AVInputFormat *iformat;
153     int no_background;
154     int abort_request;
155     int force_refresh;
156     int paused;
157     int last_paused;
158     int queue_attachments_req;
159     int seek_req;
160     int seek_flags;
161     int64_t seek_pos;
162     int64_t seek_rel;
163     int read_pause_return;
164     AVFormatContext *ic;
165     int realtime;
166
167     int audio_stream;
168
169     int av_sync_type;
170     double external_clock;                   ///< external clock base
171     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
172     int64_t external_clock_time;             ///< last reference time
173     double external_clock_speed;             ///< speed of the external clock
174
175     double audio_clock;
176     int audio_clock_serial;
177     double audio_diff_cum; /* used for AV difference average computation */
178     double audio_diff_avg_coef;
179     double audio_diff_threshold;
180     int audio_diff_avg_count;
181     AVStream *audio_st;
182     PacketQueue audioq;
183     int audio_hw_buf_size;
184     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
185     uint8_t *audio_buf;
186     uint8_t *audio_buf1;
187     unsigned int audio_buf_size; /* in bytes */
188     unsigned int audio_buf1_size;
189     int audio_buf_index; /* in bytes */
190     int audio_write_buf_size;
191     AVPacket audio_pkt_temp;
192     AVPacket audio_pkt;
193     int audio_pkt_temp_serial;
194     int audio_last_serial;
195     struct AudioParams audio_src;
196 #if CONFIG_AVFILTER
197     struct AudioParams audio_filter_src;
198 #endif
199     struct AudioParams audio_tgt;
200     struct SwrContext *swr_ctx;
201     double audio_current_pts;
202     double audio_current_pts_drift;
203     int frame_drops_early;
204     int frame_drops_late;
205     AVFrame *frame;
206
207     enum ShowMode {
208         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
209     } show_mode;
210     int16_t sample_array[SAMPLE_ARRAY_SIZE];
211     int sample_array_index;
212     int last_i_start;
213     RDFTContext *rdft;
214     int rdft_bits;
215     FFTSample *rdft_data;
216     int xpos;
217     double last_vis_time;
218
219     SDL_Thread *subtitle_tid;
220     int subtitle_stream;
221     int subtitle_stream_changed;
222     AVStream *subtitle_st;
223     PacketQueue subtitleq;
224     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
225     int subpq_size, subpq_rindex, subpq_windex;
226     SDL_mutex *subpq_mutex;
227     SDL_cond *subpq_cond;
228
229     double frame_timer;
230     double frame_last_pts;
231     double frame_last_duration;
232     double frame_last_dropped_pts;
233     double frame_last_returned_time;
234     double frame_last_filter_delay;
235     int64_t frame_last_dropped_pos;
236     int frame_last_dropped_serial;
237     int video_stream;
238     AVStream *video_st;
239     PacketQueue videoq;
240     double video_current_pts;       // current displayed pts
241     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
242     int64_t video_current_pos;      // current displayed file pos
243     double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
244     int video_clock_serial;
245     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
246     int pictq_size, pictq_rindex, pictq_windex;
247     SDL_mutex *pictq_mutex;
248     SDL_cond *pictq_cond;
249 #if !CONFIG_AVFILTER
250     struct SwsContext *img_convert_ctx;
251 #endif
252     SDL_Rect last_display_rect;
253
254     char filename[1024];
255     int width, height, xleft, ytop;
256     int step;
257
258 #if CONFIG_AVFILTER
259     AVFilterContext *in_video_filter;   // the first filter in the video chain
260     AVFilterContext *out_video_filter;  // the last filter in the video chain
261     AVFilterContext *in_audio_filter;   // the first filter in the audio chain
262     AVFilterContext *out_audio_filter;  // the last filter in the audio chain
263     AVFilterGraph *agraph;              // audio filter graph
264 #endif
265
266     int last_video_stream, last_audio_stream, last_subtitle_stream;
267
268     SDL_cond *continue_read_thread;
269 } VideoState;
270
271 /* options specified by the user */
272 static AVInputFormat *file_iformat;
273 static const char *input_filename;
274 static const char *window_title;
275 static int fs_screen_width;
276 static int fs_screen_height;
277 static int default_width  = 640;
278 static int default_height = 480;
279 static int screen_width  = 0;
280 static int screen_height = 0;
281 static int audio_disable;
282 static int video_disable;
283 static int subtitle_disable;
284 static int wanted_stream[AVMEDIA_TYPE_NB] = {
285     [AVMEDIA_TYPE_AUDIO]    = -1,
286     [AVMEDIA_TYPE_VIDEO]    = -1,
287     [AVMEDIA_TYPE_SUBTITLE] = -1,
288 };
289 static int seek_by_bytes = -1;
290 static int display_disable;
291 static int show_status = 1;
292 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
293 static int64_t start_time = AV_NOPTS_VALUE;
294 static int64_t duration = AV_NOPTS_VALUE;
295 static int workaround_bugs = 1;
296 static int fast = 0;
297 static int genpts = 0;
298 static int lowres = 0;
299 static int idct = FF_IDCT_AUTO;
300 static int error_concealment = 3;
301 static int decoder_reorder_pts = -1;
302 static int autoexit;
303 static int exit_on_keydown;
304 static int exit_on_mousedown;
305 static int loop = 1;
306 static int framedrop = -1;
307 static int infinite_buffer = -1;
308 static enum ShowMode show_mode = SHOW_MODE_NONE;
309 static const char *audio_codec_name;
310 static const char *subtitle_codec_name;
311 static const char *video_codec_name;
312 double rdftspeed = 0.02;
313 static int64_t cursor_last_shown;
314 static int cursor_hidden = 0;
315 #if CONFIG_AVFILTER
316 static char *vfilters = NULL;
317 static char *afilters = NULL;
318 #endif
319
320 /* current context */
321 static int is_full_screen;
322 static int64_t audio_callback_time;
323
324 static AVPacket flush_pkt;
325
326 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
327 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
328
329 static SDL_Surface *screen;
330
331 static inline
332 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
333                    enum AVSampleFormat fmt2, int64_t channel_count2)
334 {
335     /* If channel count == 1, planar and non-planar formats are the same */
336     if (channel_count1 == 1 && channel_count2 == 1)
337         return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
338     else
339         return channel_count1 != channel_count2 || fmt1 != fmt2;
340 }
341
342 static inline
343 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
344 {
345     if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
346         return channel_layout;
347     else
348         return 0;
349 }
350
351 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
352
353 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
354 {
355     MyAVPacketList *pkt1;
356
357     if (q->abort_request)
358        return -1;
359
360     pkt1 = av_malloc(sizeof(MyAVPacketList));
361     if (!pkt1)
362         return -1;
363     pkt1->pkt = *pkt;
364     pkt1->next = NULL;
365     if (pkt == &flush_pkt)
366         q->serial++;
367     pkt1->serial = q->serial;
368
369     if (!q->last_pkt)
370         q->first_pkt = pkt1;
371     else
372         q->last_pkt->next = pkt1;
373     q->last_pkt = pkt1;
374     q->nb_packets++;
375     q->size += pkt1->pkt.size + sizeof(*pkt1);
376     /* XXX: should duplicate packet data in DV case */
377     SDL_CondSignal(q->cond);
378     return 0;
379 }
380
381 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
382 {
383     int ret;
384
385     /* duplicate the packet */
386     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
387         return -1;
388
389     SDL_LockMutex(q->mutex);
390     ret = packet_queue_put_private(q, pkt);
391     SDL_UnlockMutex(q->mutex);
392
393     if (pkt != &flush_pkt && ret < 0)
394         av_free_packet(pkt);
395
396     return ret;
397 }
398
399 /* packet queue handling */
400 static void packet_queue_init(PacketQueue *q)
401 {
402     memset(q, 0, sizeof(PacketQueue));
403     q->mutex = SDL_CreateMutex();
404     q->cond = SDL_CreateCond();
405     q->abort_request = 1;
406 }
407
408 static void packet_queue_flush(PacketQueue *q)
409 {
410     MyAVPacketList *pkt, *pkt1;
411
412     SDL_LockMutex(q->mutex);
413     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
414         pkt1 = pkt->next;
415         av_free_packet(&pkt->pkt);
416         av_freep(&pkt);
417     }
418     q->last_pkt = NULL;
419     q->first_pkt = NULL;
420     q->nb_packets = 0;
421     q->size = 0;
422     SDL_UnlockMutex(q->mutex);
423 }
424
425 static void packet_queue_destroy(PacketQueue *q)
426 {
427     packet_queue_flush(q);
428     SDL_DestroyMutex(q->mutex);
429     SDL_DestroyCond(q->cond);
430 }
431
432 static void packet_queue_abort(PacketQueue *q)
433 {
434     SDL_LockMutex(q->mutex);
435
436     q->abort_request = 1;
437
438     SDL_CondSignal(q->cond);
439
440     SDL_UnlockMutex(q->mutex);
441 }
442
443 static void packet_queue_start(PacketQueue *q)
444 {
445     SDL_LockMutex(q->mutex);
446     q->abort_request = 0;
447     packet_queue_put_private(q, &flush_pkt);
448     SDL_UnlockMutex(q->mutex);
449 }
450
451 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
452 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
453 {
454     MyAVPacketList *pkt1;
455     int ret;
456
457     SDL_LockMutex(q->mutex);
458
459     for (;;) {
460         if (q->abort_request) {
461             ret = -1;
462             break;
463         }
464
465         pkt1 = q->first_pkt;
466         if (pkt1) {
467             q->first_pkt = pkt1->next;
468             if (!q->first_pkt)
469                 q->last_pkt = NULL;
470             q->nb_packets--;
471             q->size -= pkt1->pkt.size + sizeof(*pkt1);
472             *pkt = pkt1->pkt;
473             if (serial)
474                 *serial = pkt1->serial;
475             av_free(pkt1);
476             ret = 1;
477             break;
478         } else if (!block) {
479             ret = 0;
480             break;
481         } else {
482             SDL_CondWait(q->cond, q->mutex);
483         }
484     }
485     SDL_UnlockMutex(q->mutex);
486     return ret;
487 }
488
489 static inline void fill_rectangle(SDL_Surface *screen,
490                                   int x, int y, int w, int h, int color, int update)
491 {
492     SDL_Rect rect;
493     rect.x = x;
494     rect.y = y;
495     rect.w = w;
496     rect.h = h;
497     SDL_FillRect(screen, &rect, color);
498     if (update && w > 0 && h > 0)
499         SDL_UpdateRect(screen, x, y, w, h);
500 }
501
502 /* draw only the border of a rectangle */
503 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
504 {
505     int w1, w2, h1, h2;
506
507     /* fill the background */
508     w1 = x;
509     if (w1 < 0)
510         w1 = 0;
511     w2 = width - (x + w);
512     if (w2 < 0)
513         w2 = 0;
514     h1 = y;
515     if (h1 < 0)
516         h1 = 0;
517     h2 = height - (y + h);
518     if (h2 < 0)
519         h2 = 0;
520     fill_rectangle(screen,
521                    xleft, ytop,
522                    w1, height,
523                    color, update);
524     fill_rectangle(screen,
525                    xleft + width - w2, ytop,
526                    w2, height,
527                    color, update);
528     fill_rectangle(screen,
529                    xleft + w1, ytop,
530                    width - w1 - w2, h1,
531                    color, update);
532     fill_rectangle(screen,
533                    xleft + w1, ytop + height - h2,
534                    width - w1 - w2, h2,
535                    color, update);
536 }
537
538 #define ALPHA_BLEND(a, oldp, newp, s)\
539 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
540
541 #define RGBA_IN(r, g, b, a, s)\
542 {\
543     unsigned int v = ((const uint32_t *)(s))[0];\
544     a = (v >> 24) & 0xff;\
545     r = (v >> 16) & 0xff;\
546     g = (v >> 8) & 0xff;\
547     b = v & 0xff;\
548 }
549
550 #define YUVA_IN(y, u, v, a, s, pal)\
551 {\
552     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
553     a = (val >> 24) & 0xff;\
554     y = (val >> 16) & 0xff;\
555     u = (val >> 8) & 0xff;\
556     v = val & 0xff;\
557 }
558
559 #define YUVA_OUT(d, y, u, v, a)\
560 {\
561     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
562 }
563
564
565 #define BPP 1
566
567 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
568 {
569     int wrap, wrap3, width2, skip2;
570     int y, u, v, a, u1, v1, a1, w, h;
571     uint8_t *lum, *cb, *cr;
572     const uint8_t *p;
573     const uint32_t *pal;
574     int dstx, dsty, dstw, dsth;
575
576     dstw = av_clip(rect->w, 0, imgw);
577     dsth = av_clip(rect->h, 0, imgh);
578     dstx = av_clip(rect->x, 0, imgw - dstw);
579     dsty = av_clip(rect->y, 0, imgh - dsth);
580     lum = dst->data[0] + dsty * dst->linesize[0];
581     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
582     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
583
584     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
585     skip2 = dstx >> 1;
586     wrap = dst->linesize[0];
587     wrap3 = rect->pict.linesize[0];
588     p = rect->pict.data[0];
589     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
590
591     if (dsty & 1) {
592         lum += dstx;
593         cb += skip2;
594         cr += skip2;
595
596         if (dstx & 1) {
597             YUVA_IN(y, u, v, a, p, pal);
598             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
600             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
601             cb++;
602             cr++;
603             lum++;
604             p += BPP;
605         }
606         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612
613             YUVA_IN(y, u, v, a, p + BPP, pal);
614             u1 += u;
615             v1 += v;
616             a1 += a;
617             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
618             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
619             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
620             cb++;
621             cr++;
622             p += 2 * BPP;
623             lum += 2;
624         }
625         if (w) {
626             YUVA_IN(y, u, v, a, p, pal);
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
629             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
630             p++;
631             lum++;
632         }
633         p += wrap3 - dstw * BPP;
634         lum += wrap - dstw - dstx;
635         cb += dst->linesize[1] - width2 - skip2;
636         cr += dst->linesize[2] - width2 - skip2;
637     }
638     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
639         lum += dstx;
640         cb += skip2;
641         cr += skip2;
642
643         if (dstx & 1) {
644             YUVA_IN(y, u, v, a, p, pal);
645             u1 = u;
646             v1 = v;
647             a1 = a;
648             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
649             p += wrap3;
650             lum += wrap;
651             YUVA_IN(y, u, v, a, p, pal);
652             u1 += u;
653             v1 += v;
654             a1 += a;
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
657             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
658             cb++;
659             cr++;
660             p += -wrap3 + BPP;
661             lum += -wrap + 1;
662         }
663         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
664             YUVA_IN(y, u, v, a, p, pal);
665             u1 = u;
666             v1 = v;
667             a1 = a;
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669
670             YUVA_IN(y, u, v, a, p + BPP, pal);
671             u1 += u;
672             v1 += v;
673             a1 += a;
674             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
675             p += wrap3;
676             lum += wrap;
677
678             YUVA_IN(y, u, v, a, p, pal);
679             u1 += u;
680             v1 += v;
681             a1 += a;
682             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
683
684             YUVA_IN(y, u, v, a, p + BPP, pal);
685             u1 += u;
686             v1 += v;
687             a1 += a;
688             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
689
690             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
691             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
692
693             cb++;
694             cr++;
695             p += -wrap3 + 2 * BPP;
696             lum += -wrap + 2;
697         }
698         if (w) {
699             YUVA_IN(y, u, v, a, p, pal);
700             u1 = u;
701             v1 = v;
702             a1 = a;
703             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
704             p += wrap3;
705             lum += wrap;
706             YUVA_IN(y, u, v, a, p, pal);
707             u1 += u;
708             v1 += v;
709             a1 += a;
710             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
711             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
712             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
713             cb++;
714             cr++;
715             p += -wrap3 + BPP;
716             lum += -wrap + 1;
717         }
718         p += wrap3 + (wrap3 - dstw * BPP);
719         lum += wrap + (wrap - dstw - dstx);
720         cb += dst->linesize[1] - width2 - skip2;
721         cr += dst->linesize[2] - width2 - skip2;
722     }
723     /* handle odd height */
724     if (h) {
725         lum += dstx;
726         cb += skip2;
727         cr += skip2;
728
729         if (dstx & 1) {
730             YUVA_IN(y, u, v, a, p, pal);
731             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
732             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
733             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
734             cb++;
735             cr++;
736             lum++;
737             p += BPP;
738         }
739         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
740             YUVA_IN(y, u, v, a, p, pal);
741             u1 = u;
742             v1 = v;
743             a1 = a;
744             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
745
746             YUVA_IN(y, u, v, a, p + BPP, pal);
747             u1 += u;
748             v1 += v;
749             a1 += a;
750             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
751             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
752             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
753             cb++;
754             cr++;
755             p += 2 * BPP;
756             lum += 2;
757         }
758         if (w) {
759             YUVA_IN(y, u, v, a, p, pal);
760             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
761             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
762             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
763         }
764     }
765 }
766
767 static void free_subpicture(SubPicture *sp)
768 {
769     avsubtitle_free(&sp->sub);
770 }
771
772 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
773 {
774     float aspect_ratio;
775     int width, height, x, y;
776
777     if (vp->sar.num == 0)
778         aspect_ratio = 0;
779     else
780         aspect_ratio = av_q2d(vp->sar);
781
782     if (aspect_ratio <= 0.0)
783         aspect_ratio = 1.0;
784     aspect_ratio *= (float)vp->width / (float)vp->height;
785
786     /* XXX: we suppose the screen has a 1.0 pixel ratio */
787     height = scr_height;
788     width = ((int)rint(height * aspect_ratio)) & ~1;
789     if (width > scr_width) {
790         width = scr_width;
791         height = ((int)rint(width / aspect_ratio)) & ~1;
792     }
793     x = (scr_width - width) / 2;
794     y = (scr_height - height) / 2;
795     rect->x = scr_xleft + x;
796     rect->y = scr_ytop  + y;
797     rect->w = FFMAX(width,  1);
798     rect->h = FFMAX(height, 1);
799 }
800
801 static void video_image_display(VideoState *is)
802 {
803     VideoPicture *vp;
804     SubPicture *sp;
805     AVPicture pict;
806     SDL_Rect rect;
807     int i;
808
809     vp = &is->pictq[is->pictq_rindex];
810     if (vp->bmp) {
811         if (is->subtitle_st) {
812             if (is->subpq_size > 0) {
813                 sp = &is->subpq[is->subpq_rindex];
814
815                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
816                     SDL_LockYUVOverlay (vp->bmp);
817
818                     pict.data[0] = vp->bmp->pixels[0];
819                     pict.data[1] = vp->bmp->pixels[2];
820                     pict.data[2] = vp->bmp->pixels[1];
821
822                     pict.linesize[0] = vp->bmp->pitches[0];
823                     pict.linesize[1] = vp->bmp->pitches[2];
824                     pict.linesize[2] = vp->bmp->pitches[1];
825
826                     for (i = 0; i < sp->sub.num_rects; i++)
827                         blend_subrect(&pict, sp->sub.rects[i],
828                                       vp->bmp->w, vp->bmp->h);
829
830                     SDL_UnlockYUVOverlay (vp->bmp);
831                 }
832             }
833         }
834
835         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
836
837         SDL_DisplayYUVOverlay(vp->bmp, &rect);
838
839         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
840             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
841             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
842             is->last_display_rect = rect;
843         }
844     }
845 }
846
847 static inline int compute_mod(int a, int b)
848 {
849     return a < 0 ? a%b + b : a%b;
850 }
851
852 static void video_audio_display(VideoState *s)
853 {
854     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
855     int ch, channels, h, h2, bgcolor, fgcolor;
856     int64_t time_diff;
857     int rdft_bits, nb_freq;
858
859     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
860         ;
861     nb_freq = 1 << (rdft_bits - 1);
862
863     /* compute display index : center on currently output samples */
864     channels = s->audio_tgt.channels;
865     nb_display_channels = channels;
866     if (!s->paused) {
867         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
868         n = 2 * channels;
869         delay = s->audio_write_buf_size;
870         delay /= n;
871
872         /* to be more precise, we take into account the time spent since
873            the last buffer computation */
874         if (audio_callback_time) {
875             time_diff = av_gettime() - audio_callback_time;
876             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
877         }
878
879         delay += 2 * data_used;
880         if (delay < data_used)
881             delay = data_used;
882
883         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
884         if (s->show_mode == SHOW_MODE_WAVES) {
885             h = INT_MIN;
886             for (i = 0; i < 1000; i += channels) {
887                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
888                 int a = s->sample_array[idx];
889                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
890                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
891                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
892                 int score = a - d;
893                 if (h < score && (b ^ c) < 0) {
894                     h = score;
895                     i_start = idx;
896                 }
897             }
898         }
899
900         s->last_i_start = i_start;
901     } else {
902         i_start = s->last_i_start;
903     }
904
905     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
906     if (s->show_mode == SHOW_MODE_WAVES) {
907         fill_rectangle(screen,
908                        s->xleft, s->ytop, s->width, s->height,
909                        bgcolor, 0);
910
911         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
912
913         /* total height for one channel */
914         h = s->height / nb_display_channels;
915         /* graph height / 2 */
916         h2 = (h * 9) / 20;
917         for (ch = 0; ch < nb_display_channels; ch++) {
918             i = i_start + ch;
919             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
920             for (x = 0; x < s->width; x++) {
921                 y = (s->sample_array[i] * h2) >> 15;
922                 if (y < 0) {
923                     y = -y;
924                     ys = y1 - y;
925                 } else {
926                     ys = y1;
927                 }
928                 fill_rectangle(screen,
929                                s->xleft + x, ys, 1, y,
930                                fgcolor, 0);
931                 i += channels;
932                 if (i >= SAMPLE_ARRAY_SIZE)
933                     i -= SAMPLE_ARRAY_SIZE;
934             }
935         }
936
937         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
938
939         for (ch = 1; ch < nb_display_channels; ch++) {
940             y = s->ytop + ch * h;
941             fill_rectangle(screen,
942                            s->xleft, y, s->width, 1,
943                            fgcolor, 0);
944         }
945         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
946     } else {
947         nb_display_channels= FFMIN(nb_display_channels, 2);
948         if (rdft_bits != s->rdft_bits) {
949             av_rdft_end(s->rdft);
950             av_free(s->rdft_data);
951             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
952             s->rdft_bits = rdft_bits;
953             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
954         }
955         {
956             FFTSample *data[2];
957             for (ch = 0; ch < nb_display_channels; ch++) {
958                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
959                 i = i_start + ch;
960                 for (x = 0; x < 2 * nb_freq; x++) {
961                     double w = (x-nb_freq) * (1.0 / nb_freq);
962                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
963                     i += channels;
964                     if (i >= SAMPLE_ARRAY_SIZE)
965                         i -= SAMPLE_ARRAY_SIZE;
966                 }
967                 av_rdft_calc(s->rdft, data[ch]);
968             }
969             // least efficient way to do this, we should of course directly access it but its more than fast enough
970             for (y = 0; y < s->height; y++) {
971                 double w = 1 / sqrt(nb_freq);
972                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
973                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
974                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
975                 a = FFMIN(a, 255);
976                 b = FFMIN(b, 255);
977                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
978
979                 fill_rectangle(screen,
980                             s->xpos, s->height-y, 1, 1,
981                             fgcolor, 0);
982             }
983         }
984         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
985         if (!s->paused)
986             s->xpos++;
987         if (s->xpos >= s->width)
988             s->xpos= s->xleft;
989     }
990 }
991
992 static void stream_close(VideoState *is)
993 {
994     VideoPicture *vp;
995     int i;
996     /* XXX: use a special url_shutdown call to abort parse cleanly */
997     is->abort_request = 1;
998     SDL_WaitThread(is->read_tid, NULL);
999     packet_queue_destroy(&is->videoq);
1000     packet_queue_destroy(&is->audioq);
1001     packet_queue_destroy(&is->subtitleq);
1002
1003     /* free all pictures */
1004     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1005         vp = &is->pictq[i];
1006         if (vp->bmp) {
1007             SDL_FreeYUVOverlay(vp->bmp);
1008             vp->bmp = NULL;
1009         }
1010     }
1011     SDL_DestroyMutex(is->pictq_mutex);
1012     SDL_DestroyCond(is->pictq_cond);
1013     SDL_DestroyMutex(is->subpq_mutex);
1014     SDL_DestroyCond(is->subpq_cond);
1015     SDL_DestroyCond(is->continue_read_thread);
1016 #if !CONFIG_AVFILTER
1017     sws_freeContext(is->img_convert_ctx);
1018 #endif
1019     av_free(is);
1020 }
1021
1022 static void do_exit(VideoState *is)
1023 {
1024     if (is) {
1025         stream_close(is);
1026     }
1027     av_lockmgr_register(NULL);
1028     uninit_opts();
1029 #if CONFIG_AVFILTER
1030     avfilter_uninit();
1031     av_freep(&vfilters);
1032 #endif
1033     avformat_network_deinit();
1034     if (show_status)
1035         printf("\n");
1036     SDL_Quit();
1037     av_log(NULL, AV_LOG_QUIET, "%s", "");
1038     exit(0);
1039 }
1040
1041 static void sigterm_handler(int sig)
1042 {
1043     exit(123);
1044 }
1045
1046 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1047 {
1048     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1049     int w,h;
1050     SDL_Rect rect;
1051
1052     if (is_full_screen) flags |= SDL_FULLSCREEN;
1053     else                flags |= SDL_RESIZABLE;
1054
1055     if (vp && vp->width) {
1056         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1057         default_width  = rect.w;
1058         default_height = rect.h;
1059     }
1060
1061     if (is_full_screen && fs_screen_width) {
1062         w = fs_screen_width;
1063         h = fs_screen_height;
1064     } else if (!is_full_screen && screen_width) {
1065         w = screen_width;
1066         h = screen_height;
1067     } else {
1068         w = default_width;
1069         h = default_height;
1070     }
1071     if (screen && is->width == screen->w && screen->w == w
1072        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1073         return 0;
1074     screen = SDL_SetVideoMode(w, h, 0, flags);
1075     if (!screen) {
1076         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1077         do_exit(is);
1078     }
1079     if (!window_title)
1080         window_title = input_filename;
1081     SDL_WM_SetCaption(window_title, window_title);
1082
1083     is->width  = screen->w;
1084     is->height = screen->h;
1085
1086     return 0;
1087 }
1088
1089 /* display the current picture, if any */
1090 static void video_display(VideoState *is)
1091 {
1092     if (!screen)
1093         video_open(is, 0, NULL);
1094     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1095         video_audio_display(is);
1096     else if (is->video_st)
1097         video_image_display(is);
1098 }
1099
1100 /* get the current audio clock value */
1101 static double get_audio_clock(VideoState *is)
1102 {
1103     if (is->audio_clock_serial != is->audioq.serial)
1104         return NAN;
1105     if (is->paused) {
1106         return is->audio_current_pts;
1107     } else {
1108         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1109     }
1110 }
1111
1112 /* get the current video clock value */
1113 static double get_video_clock(VideoState *is)
1114 {
1115     if (is->video_clock_serial != is->videoq.serial)
1116         return NAN;
1117     if (is->paused) {
1118         return is->video_current_pts;
1119     } else {
1120         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1121     }
1122 }
1123
1124 /* get the current external clock value */
1125 static double get_external_clock(VideoState *is)
1126 {
1127     if (is->paused) {
1128         return is->external_clock;
1129     } else {
1130         double time = av_gettime() / 1000000.0;
1131         return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
1132     }
1133 }
1134
1135 static int get_master_sync_type(VideoState *is) {
1136     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1137         if (is->video_st)
1138             return AV_SYNC_VIDEO_MASTER;
1139         else
1140             return AV_SYNC_AUDIO_MASTER;
1141     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1142         if (is->audio_st)
1143             return AV_SYNC_AUDIO_MASTER;
1144         else
1145             return AV_SYNC_EXTERNAL_CLOCK;
1146     } else {
1147         return AV_SYNC_EXTERNAL_CLOCK;
1148     }
1149 }
1150
1151 /* get the current master clock value */
1152 static double get_master_clock(VideoState *is)
1153 {
1154     double val;
1155
1156     switch (get_master_sync_type(is)) {
1157         case AV_SYNC_VIDEO_MASTER:
1158             val = get_video_clock(is);
1159             break;
1160         case AV_SYNC_AUDIO_MASTER:
1161             val = get_audio_clock(is);
1162             break;
1163         default:
1164             val = get_external_clock(is);
1165             break;
1166     }
1167     return val;
1168 }
1169
1170 static void update_external_clock_pts(VideoState *is, double pts)
1171 {
1172    is->external_clock_time = av_gettime();
1173    is->external_clock = pts;
1174    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1175 }
1176
1177 static void check_external_clock_sync(VideoState *is, double pts) {
1178     double ext_clock = get_external_clock(is);
1179     if (isnan(ext_clock) || fabs(ext_clock - pts) > AV_NOSYNC_THRESHOLD) {
1180         update_external_clock_pts(is, pts);
1181     }
1182 }
1183
1184 static void update_external_clock_speed(VideoState *is, double speed) {
1185     update_external_clock_pts(is, get_external_clock(is));
1186     is->external_clock_speed = speed;
1187 }
1188
1189 static void check_external_clock_speed(VideoState *is) {
1190    if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1191        is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1192        update_external_clock_speed(is, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->external_clock_speed - EXTERNAL_CLOCK_SPEED_STEP));
1193    } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1194               (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1195        update_external_clock_speed(is, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->external_clock_speed + EXTERNAL_CLOCK_SPEED_STEP));
1196    } else {
1197        double speed = is->external_clock_speed;
1198        if (speed != 1.0)
1199            update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1200    }
1201 }
1202
1203 /* seek in the stream */
1204 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1205 {
1206     if (!is->seek_req) {
1207         is->seek_pos = pos;
1208         is->seek_rel = rel;
1209         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1210         if (seek_by_bytes)
1211             is->seek_flags |= AVSEEK_FLAG_BYTE;
1212         is->seek_req = 1;
1213         SDL_CondSignal(is->continue_read_thread);
1214     }
1215 }
1216
1217 /* pause or resume the video */
1218 static void stream_toggle_pause(VideoState *is)
1219 {
1220     if (is->paused) {
1221         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1222         if (is->read_pause_return != AVERROR(ENOSYS)) {
1223             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1224         }
1225         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1226     }
1227     update_external_clock_pts(is, get_external_clock(is));
1228     is->paused = !is->paused;
1229 }
1230
1231 static void toggle_pause(VideoState *is)
1232 {
1233     stream_toggle_pause(is);
1234     is->step = 0;
1235 }
1236
1237 static void step_to_next_frame(VideoState *is)
1238 {
1239     /* if the stream is paused unpause it, then step */
1240     if (is->paused)
1241         stream_toggle_pause(is);
1242     is->step = 1;
1243 }
1244
1245 static double compute_target_delay(double delay, VideoState *is)
1246 {
1247     double sync_threshold, diff;
1248
1249     /* update delay to follow master synchronisation source */
1250     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1251         /* if video is slave, we try to correct big delays by
1252            duplicating or deleting a frame */
1253         diff = get_video_clock(is) - get_master_clock(is);
1254
1255         /* skip or repeat frame. We take into account the
1256            delay to compute the threshold. I still don't know
1257            if it is the best guess */
1258         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1259         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
1260             if (diff <= -sync_threshold)
1261                 delay = 0;
1262             else if (diff >= sync_threshold)
1263                 delay = 2 * delay;
1264         }
1265     }
1266
1267     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1268             delay, -diff);
1269
1270     return delay;
1271 }
1272
1273 static void pictq_next_picture(VideoState *is) {
1274     /* update queue size and signal for next picture */
1275     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1276         is->pictq_rindex = 0;
1277
1278     SDL_LockMutex(is->pictq_mutex);
1279     is->pictq_size--;
1280     SDL_CondSignal(is->pictq_cond);
1281     SDL_UnlockMutex(is->pictq_mutex);
1282 }
1283
1284 static int pictq_prev_picture(VideoState *is) {
1285     VideoPicture *prevvp;
1286     int ret = 0;
1287     /* update queue size and signal for the previous picture */
1288     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1289     if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1290         SDL_LockMutex(is->pictq_mutex);
1291         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1292             if (--is->pictq_rindex == -1)
1293                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1294             is->pictq_size++;
1295             ret = 1;
1296         }
1297         SDL_CondSignal(is->pictq_cond);
1298         SDL_UnlockMutex(is->pictq_mutex);
1299     }
1300     return ret;
1301 }
1302
1303 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1304     double time = av_gettime() / 1000000.0;
1305     /* update current video pts */
1306     is->video_current_pts = pts;
1307     is->video_current_pts_drift = is->video_current_pts - time;
1308     is->video_current_pos = pos;
1309     is->frame_last_pts = pts;
1310     is->video_clock_serial = serial;
1311     if (is->videoq.serial == serial)
1312         check_external_clock_sync(is, is->video_current_pts);
1313 }
1314
1315 /* called to display each frame */
1316 static void video_refresh(void *opaque, double *remaining_time)
1317 {
1318     VideoState *is = opaque;
1319     VideoPicture *vp;
1320     double time;
1321
1322     SubPicture *sp, *sp2;
1323
1324     if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1325         check_external_clock_speed(is);
1326
1327     if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1328         time = av_gettime() / 1000000.0;
1329         if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1330             video_display(is);
1331             is->last_vis_time = time;
1332         }
1333         *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1334     }
1335
1336     if (is->video_st) {
1337         int redisplay = 0;
1338         if (is->force_refresh)
1339             redisplay = pictq_prev_picture(is);
1340 retry:
1341         if (is->pictq_size == 0) {
1342             SDL_LockMutex(is->pictq_mutex);
1343             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1344                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
1345                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1346             }
1347             SDL_UnlockMutex(is->pictq_mutex);
1348             // nothing to do, no picture to display in the queue
1349         } else {
1350             double last_duration, duration, delay;
1351             /* dequeue the picture */
1352             vp = &is->pictq[is->pictq_rindex];
1353
1354             if (vp->serial != is->videoq.serial) {
1355                 pictq_next_picture(is);
1356                 redisplay = 0;
1357                 goto retry;
1358             }
1359
1360             if (is->paused)
1361                 goto display;
1362
1363             /* compute nominal last_duration */
1364             last_duration = vp->pts - is->frame_last_pts;
1365             if (last_duration > 0 && last_duration < is->max_frame_duration) {
1366                 /* if duration of the last frame was sane, update last_duration in video state */
1367                 is->frame_last_duration = last_duration;
1368             }
1369             delay = compute_target_delay(is->frame_last_duration, is);
1370
1371             time= av_gettime()/1000000.0;
1372             if (time < is->frame_timer + delay) {
1373                 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1374                 return;
1375             }
1376
1377             if (delay > 0)
1378                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1379
1380             SDL_LockMutex(is->pictq_mutex);
1381             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1382             SDL_UnlockMutex(is->pictq_mutex);
1383
1384             if (is->pictq_size > 1) {
1385                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1386                 duration = nextvp->pts - vp->pts;
1387                 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1388                     if (!redisplay)
1389                         is->frame_drops_late++;
1390                     pictq_next_picture(is);
1391                     redisplay = 0;
1392                     goto retry;
1393                 }
1394             }
1395
1396             if (is->subtitle_st) {
1397                 if (is->subtitle_stream_changed) {
1398                     SDL_LockMutex(is->subpq_mutex);
1399
1400                     while (is->subpq_size) {
1401                         free_subpicture(&is->subpq[is->subpq_rindex]);
1402
1403                         /* update queue size and signal for next picture */
1404                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1405                             is->subpq_rindex = 0;
1406
1407                         is->subpq_size--;
1408                     }
1409                     is->subtitle_stream_changed = 0;
1410
1411                     SDL_CondSignal(is->subpq_cond);
1412                     SDL_UnlockMutex(is->subpq_mutex);
1413                 } else {
1414                     if (is->subpq_size > 0) {
1415                         sp = &is->subpq[is->subpq_rindex];
1416
1417                         if (is->subpq_size > 1)
1418                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1419                         else
1420                             sp2 = NULL;
1421
1422                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1423                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1424                         {
1425                             free_subpicture(sp);
1426
1427                             /* update queue size and signal for next picture */
1428                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1429                                 is->subpq_rindex = 0;
1430
1431                             SDL_LockMutex(is->subpq_mutex);
1432                             is->subpq_size--;
1433                             SDL_CondSignal(is->subpq_cond);
1434                             SDL_UnlockMutex(is->subpq_mutex);
1435                         }
1436                     }
1437                 }
1438             }
1439
1440 display:
1441             /* display picture */
1442             if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1443                 video_display(is);
1444
1445             pictq_next_picture(is);
1446
1447             if (is->step && !is->paused)
1448                 stream_toggle_pause(is);
1449         }
1450     }
1451     is->force_refresh = 0;
1452     if (show_status) {
1453         static int64_t last_time;
1454         int64_t cur_time;
1455         int aqsize, vqsize, sqsize;
1456         double av_diff;
1457
1458         cur_time = av_gettime();
1459         if (!last_time || (cur_time - last_time) >= 30000) {
1460             aqsize = 0;
1461             vqsize = 0;
1462             sqsize = 0;
1463             if (is->audio_st)
1464                 aqsize = is->audioq.size;
1465             if (is->video_st)
1466                 vqsize = is->videoq.size;
1467             if (is->subtitle_st)
1468                 sqsize = is->subtitleq.size;
1469             av_diff = 0;
1470             if (is->audio_st && is->video_st)
1471                 av_diff = get_audio_clock(is) - get_video_clock(is);
1472             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1473                    get_master_clock(is),
1474                    av_diff,
1475                    is->frame_drops_early + is->frame_drops_late,
1476                    aqsize / 1024,
1477                    vqsize / 1024,
1478                    sqsize,
1479                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1480                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1481             fflush(stdout);
1482             last_time = cur_time;
1483         }
1484     }
1485 }
1486
1487 /* allocate a picture (needs to do that in main thread to avoid
1488    potential locking problems */
1489 static void alloc_picture(VideoState *is)
1490 {
1491     VideoPicture *vp;
1492
1493     vp = &is->pictq[is->pictq_windex];
1494
1495     if (vp->bmp)
1496         SDL_FreeYUVOverlay(vp->bmp);
1497
1498     video_open(is, 0, vp);
1499
1500     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1501                                    SDL_YV12_OVERLAY,
1502                                    screen);
1503     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1504         /* SDL allocates a buffer smaller than requested if the video
1505          * overlay hardware is unable to support the requested size. */
1506         fprintf(stderr, "Error: the video system does not support an image\n"
1507                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1508                         "to reduce the image size.\n", vp->width, vp->height );
1509         do_exit(is);
1510     }
1511
1512     SDL_LockMutex(is->pictq_mutex);
1513     vp->allocated = 1;
1514     SDL_CondSignal(is->pictq_cond);
1515     SDL_UnlockMutex(is->pictq_mutex);
1516 }
1517
1518 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1519     int i, width, height;
1520     Uint8 *p, *maxp;
1521     for (i = 0; i < 3; i++) {
1522         width  = bmp->w;
1523         height = bmp->h;
1524         if (i > 0) {
1525             width  >>= 1;
1526             height >>= 1;
1527         }
1528         if (bmp->pitches[i] > width) {
1529             maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1530             for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1531                 *(p+1) = *p;
1532         }
1533     }
1534 }
1535
1536 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1537 {
1538     VideoPicture *vp;
1539
1540 #if defined(DEBUG_SYNC) && 0
1541     printf("frame_type=%c pts=%0.3f\n",
1542            av_get_picture_type_char(src_frame->pict_type), pts);
1543 #endif
1544
1545     /* wait until we have space to put a new picture */
1546     SDL_LockMutex(is->pictq_mutex);
1547
1548     /* keep the last already displayed picture in the queue */
1549     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1550            !is->videoq.abort_request) {
1551         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1552     }
1553     SDL_UnlockMutex(is->pictq_mutex);
1554
1555     if (is->videoq.abort_request)
1556         return -1;
1557
1558     vp = &is->pictq[is->pictq_windex];
1559
1560 #if CONFIG_AVFILTER
1561     vp->sar = src_frame->sample_aspect_ratio;
1562 #else
1563     vp->sar = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1564 #endif
1565
1566     /* alloc or resize hardware picture buffer */
1567     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1568         vp->width  != src_frame->width ||
1569         vp->height != src_frame->height) {
1570         SDL_Event event;
1571
1572         vp->allocated  = 0;
1573         vp->reallocate = 0;
1574         vp->width = src_frame->width;
1575         vp->height = src_frame->height;
1576
1577         /* the allocation must be done in the main thread to avoid
1578            locking problems. */
1579         event.type = FF_ALLOC_EVENT;
1580         event.user.data1 = is;
1581         SDL_PushEvent(&event);
1582
1583         /* wait until the picture is allocated */
1584         SDL_LockMutex(is->pictq_mutex);
1585         while (!vp->allocated && !is->videoq.abort_request) {
1586             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1587         }
1588         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1589         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1590             while (!vp->allocated) {
1591                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1592             }
1593         }
1594         SDL_UnlockMutex(is->pictq_mutex);
1595
1596         if (is->videoq.abort_request)
1597             return -1;
1598     }
1599
1600     /* if the frame is not skipped, then display it */
1601     if (vp->bmp) {
1602         AVPicture pict = { { 0 } };
1603
1604         /* get a pointer on the bitmap */
1605         SDL_LockYUVOverlay (vp->bmp);
1606
1607         pict.data[0] = vp->bmp->pixels[0];
1608         pict.data[1] = vp->bmp->pixels[2];
1609         pict.data[2] = vp->bmp->pixels[1];
1610
1611         pict.linesize[0] = vp->bmp->pitches[0];
1612         pict.linesize[1] = vp->bmp->pitches[2];
1613         pict.linesize[2] = vp->bmp->pitches[1];
1614
1615 #if CONFIG_AVFILTER
1616         // FIXME use direct rendering
1617         av_picture_copy(&pict, (AVPicture *)src_frame,
1618                         src_frame->format, vp->width, vp->height);
1619 #else
1620         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1621         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1622             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1623             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1624         if (is->img_convert_ctx == NULL) {
1625             fprintf(stderr, "Cannot initialize the conversion context\n");
1626             exit(1);
1627         }
1628         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1629                   0, vp->height, pict.data, pict.linesize);
1630 #endif
1631         /* workaround SDL PITCH_WORKAROUND */
1632         duplicate_right_border_pixels(vp->bmp);
1633         /* update the bitmap content */
1634         SDL_UnlockYUVOverlay(vp->bmp);
1635
1636         vp->pts = pts;
1637         vp->pos = pos;
1638         vp->serial = serial;
1639
1640         /* now we can update the picture count */
1641         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1642             is->pictq_windex = 0;
1643         SDL_LockMutex(is->pictq_mutex);
1644         is->pictq_size++;
1645         SDL_UnlockMutex(is->pictq_mutex);
1646     }
1647     return 0;
1648 }
1649
1650 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1651 {
1652     int got_picture;
1653
1654     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1655         return -1;
1656
1657     if (pkt->data == flush_pkt.data) {
1658         avcodec_flush_buffers(is->video_st->codec);
1659
1660         SDL_LockMutex(is->pictq_mutex);
1661         // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1662         while (is->pictq_size && !is->videoq.abort_request) {
1663             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1664         }
1665         is->video_current_pos = -1;
1666         is->frame_last_pts = AV_NOPTS_VALUE;
1667         is->frame_last_duration = 0;
1668         is->frame_timer = (double)av_gettime() / 1000000.0;
1669         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1670         SDL_UnlockMutex(is->pictq_mutex);
1671         return 0;
1672     }
1673
1674     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1675         return 0;
1676
1677     if (got_picture) {
1678         int ret = 1;
1679
1680         if (decoder_reorder_pts == -1) {
1681             *pts = av_frame_get_best_effort_timestamp(frame);
1682         } else if (decoder_reorder_pts) {
1683             *pts = frame->pkt_pts;
1684         } else {
1685             *pts = frame->pkt_dts;
1686         }
1687
1688         if (*pts == AV_NOPTS_VALUE) {
1689             *pts = 0;
1690         }
1691
1692         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1693             SDL_LockMutex(is->pictq_mutex);
1694             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1695                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1696                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1697                 double ptsdiff = dpts - is->frame_last_pts;
1698                 if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1699                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1700                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1701                     is->frame_last_dropped_pos = pkt->pos;
1702                     is->frame_last_dropped_pts = dpts;
1703                     is->frame_last_dropped_serial = *serial;
1704                     is->frame_drops_early++;
1705                     av_frame_unref(frame);
1706                     ret = 0;
1707                 }
1708             }
1709             SDL_UnlockMutex(is->pictq_mutex);
1710         }
1711
1712         return ret;
1713     }
1714     return 0;
1715 }
1716
1717 #if CONFIG_AVFILTER
1718 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1719                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1720 {
1721     int ret;
1722     AVFilterInOut *outputs = NULL, *inputs = NULL;
1723
1724     if (filtergraph) {
1725         outputs = avfilter_inout_alloc();
1726         inputs  = avfilter_inout_alloc();
1727         if (!outputs || !inputs) {
1728             ret = AVERROR(ENOMEM);
1729             goto fail;
1730         }
1731
1732         outputs->name       = av_strdup("in");
1733         outputs->filter_ctx = source_ctx;
1734         outputs->pad_idx    = 0;
1735         outputs->next       = NULL;
1736
1737         inputs->name        = av_strdup("out");
1738         inputs->filter_ctx  = sink_ctx;
1739         inputs->pad_idx     = 0;
1740         inputs->next        = NULL;
1741
1742         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1743             goto fail;
1744     } else {
1745         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1746             goto fail;
1747     }
1748
1749     ret = avfilter_graph_config(graph, NULL);
1750 fail:
1751     avfilter_inout_free(&outputs);
1752     avfilter_inout_free(&inputs);
1753     return ret;
1754 }
1755
1756 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1757 {
1758     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1759     char sws_flags_str[128];
1760     char buffersrc_args[256];
1761     int ret;
1762     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1763     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1764     AVCodecContext *codec = is->video_st->codec;
1765     AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1766
1767     if (!buffersink_params)
1768         return AVERROR(ENOMEM);
1769
1770     av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1771     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1772     graph->scale_sws_opts = av_strdup(sws_flags_str);
1773
1774     snprintf(buffersrc_args, sizeof(buffersrc_args),
1775              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1776              frame->width, frame->height, frame->format,
1777              is->video_st->time_base.num, is->video_st->time_base.den,
1778              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1779     if (fr.num && fr.den)
1780         av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1781
1782     if ((ret = avfilter_graph_create_filter(&filt_src,
1783                                             avfilter_get_by_name("buffer"),
1784                                             "ffplay_buffer", buffersrc_args, NULL,
1785                                             graph)) < 0)
1786         goto fail;
1787
1788     buffersink_params->pixel_fmts = pix_fmts;
1789     ret = avfilter_graph_create_filter(&filt_out,
1790                                        avfilter_get_by_name("buffersink"),
1791                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1792     if (ret < 0)
1793         goto fail;
1794
1795     /* SDL YUV code is not handling odd width/height for some driver
1796      * combinations, therefore we crop the picture to an even width/height. */
1797     if ((ret = avfilter_graph_create_filter(&filt_crop,
1798                                             avfilter_get_by_name("crop"),
1799                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1800         goto fail;
1801     if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1802         goto fail;
1803
1804     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1805         goto fail;
1806
1807     is->in_video_filter  = filt_src;
1808     is->out_video_filter = filt_out;
1809
1810 fail:
1811     av_freep(&buffersink_params);
1812     return ret;
1813 }
1814
1815 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1816 {
1817     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1818     int sample_rates[2] = { 0, -1 };
1819     int64_t channel_layouts[2] = { 0, -1 };
1820     int channels[2] = { 0, -1 };
1821     AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1822     char asrc_args[256];
1823     AVABufferSinkParams *asink_params = NULL;
1824     int ret;
1825
1826     avfilter_graph_free(&is->agraph);
1827     if (!(is->agraph = avfilter_graph_alloc()))
1828         return AVERROR(ENOMEM);
1829
1830     ret = snprintf(asrc_args, sizeof(asrc_args),
1831                    "sample_rate=%d:sample_fmt=%s:channels=%d",
1832                    is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1833                    is->audio_filter_src.channels);
1834     if (is->audio_filter_src.channel_layout)
1835         snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1836                  ":channel_layout=0x%"PRIx64,  is->audio_filter_src.channel_layout);
1837
1838     ret = avfilter_graph_create_filter(&filt_asrc,
1839                                        avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1840                                        asrc_args, NULL, is->agraph);
1841     if (ret < 0)
1842         goto end;
1843
1844     if (!(asink_params = av_abuffersink_params_alloc())) {
1845         ret = AVERROR(ENOMEM);
1846         goto end;
1847     }
1848     asink_params->sample_fmts = sample_fmts;
1849
1850     asink_params->all_channel_counts = 1;
1851     if (force_output_format) {
1852         channel_layouts[0] = is->audio_tgt.channel_layout;
1853         asink_params->channel_layouts = channel_layouts;
1854         asink_params->all_channel_counts = 0;
1855         channels[0] = is->audio_tgt.channels;
1856         asink_params->channel_counts = channels;
1857         asink_params->all_channel_counts = 0;
1858         sample_rates[0] = is->audio_tgt.freq;
1859         asink_params->sample_rates = sample_rates;
1860     }
1861
1862     ret = avfilter_graph_create_filter(&filt_asink,
1863                                        avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1864                                        NULL, asink_params, is->agraph);
1865     if (ret < 0)
1866         goto end;
1867
1868     if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1869         goto end;
1870
1871     is->in_audio_filter  = filt_asrc;
1872     is->out_audio_filter = filt_asink;
1873
1874 end:
1875     av_freep(&asink_params);
1876     if (ret < 0)
1877         avfilter_graph_free(&is->agraph);
1878     return ret;
1879 }
1880 #endif  /* CONFIG_AVFILTER */
1881
1882 static int video_thread(void *arg)
1883 {
1884     AVPacket pkt = { 0 };
1885     VideoState *is = arg;
1886     AVFrame *frame = av_frame_alloc();
1887     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1888     double pts;
1889     int ret;
1890     int serial = 0;
1891
1892 #if CONFIG_AVFILTER
1893     AVFilterGraph *graph = avfilter_graph_alloc();
1894     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1895     int last_w = 0;
1896     int last_h = 0;
1897     enum AVPixelFormat last_format = -2;
1898     int last_serial = -1;
1899 #endif
1900
1901     for (;;) {
1902 #if CONFIG_AVFILTER
1903         AVRational tb;
1904 #endif
1905         while (is->paused && !is->videoq.abort_request)
1906             SDL_Delay(10);
1907
1908         avcodec_get_frame_defaults(frame);
1909         av_free_packet(&pkt);
1910
1911         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1912         if (ret < 0)
1913             goto the_end;
1914
1915         if (!ret)
1916             continue;
1917
1918 #if CONFIG_AVFILTER
1919         if (   last_w != frame->width
1920             || last_h != frame->height
1921             || last_format != frame->format
1922             || last_serial != serial) {
1923             av_log(NULL, AV_LOG_DEBUG,
1924                    "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1925                    last_w, last_h,
1926                    (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1927                    frame->width, frame->height,
1928                    (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1929             avfilter_graph_free(&graph);
1930             graph = avfilter_graph_alloc();
1931             if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1932                 SDL_Event event;
1933                 event.type = FF_QUIT_EVENT;
1934                 event.user.data1 = is;
1935                 SDL_PushEvent(&event);
1936                 av_free_packet(&pkt);
1937                 goto the_end;
1938             }
1939             filt_in  = is->in_video_filter;
1940             filt_out = is->out_video_filter;
1941             last_w = frame->width;
1942             last_h = frame->height;
1943             last_format = frame->format;
1944             last_serial = serial;
1945         }
1946
1947         frame->pts = pts_int;
1948         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1949         ret = av_buffersrc_add_frame(filt_in, frame);
1950         if (ret < 0)
1951             goto the_end;
1952         av_frame_unref(frame);
1953         avcodec_get_frame_defaults(frame);
1954         av_free_packet(&pkt);
1955
1956         while (ret >= 0) {
1957             is->frame_last_returned_time = av_gettime() / 1000000.0;
1958
1959             ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1960             if (ret < 0) {
1961                 ret = 0;
1962                 break;
1963             }
1964
1965             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1966             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1967                 is->frame_last_filter_delay = 0;
1968
1969             pts_int = frame->pts;
1970             tb      = filt_out->inputs[0]->time_base;
1971             pos     = av_frame_get_pkt_pos(frame);
1972             if (av_cmp_q(tb, is->video_st->time_base)) {
1973                 av_unused int64_t pts1 = pts_int;
1974                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1975                 av_dlog(NULL, "video_thread(): "
1976                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1977                         tb.num, tb.den, pts1,
1978                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1979             }
1980             pts = pts_int * av_q2d(is->video_st->time_base);
1981             ret = queue_picture(is, frame, pts, pos, serial);
1982             av_frame_unref(frame);
1983         }
1984 #else
1985         pts = pts_int * av_q2d(is->video_st->time_base);
1986         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1987         av_frame_unref(frame);
1988 #endif
1989
1990         if (ret < 0)
1991             goto the_end;
1992     }
1993  the_end:
1994     avcodec_flush_buffers(is->video_st->codec);
1995 #if CONFIG_AVFILTER
1996     avfilter_graph_free(&graph);
1997 #endif
1998     av_free_packet(&pkt);
1999     av_frame_free(&frame);
2000     return 0;
2001 }
2002
2003 static int subtitle_thread(void *arg)
2004 {
2005     VideoState *is = arg;
2006     SubPicture *sp;
2007     AVPacket pkt1, *pkt = &pkt1;
2008     int got_subtitle;
2009     double pts;
2010     int i, j;
2011     int r, g, b, y, u, v, a;
2012
2013     for (;;) {
2014         while (is->paused && !is->subtitleq.abort_request) {
2015             SDL_Delay(10);
2016         }
2017         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
2018             break;
2019
2020         if (pkt->data == flush_pkt.data) {
2021             avcodec_flush_buffers(is->subtitle_st->codec);
2022             continue;
2023         }
2024         SDL_LockMutex(is->subpq_mutex);
2025         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2026                !is->subtitleq.abort_request) {
2027             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2028         }
2029         SDL_UnlockMutex(is->subpq_mutex);
2030
2031         if (is->subtitleq.abort_request)
2032             return 0;
2033
2034         sp = &is->subpq[is->subpq_windex];
2035
2036        /* NOTE: ipts is the PTS of the _first_ picture beginning in
2037            this packet, if any */
2038         pts = 0;
2039         if (pkt->pts != AV_NOPTS_VALUE)
2040             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2041
2042         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2043                                  &got_subtitle, pkt);
2044         if (got_subtitle && sp->sub.format == 0) {
2045             if (sp->sub.pts != AV_NOPTS_VALUE)
2046                 pts = sp->sub.pts / (double)AV_TIME_BASE;
2047             sp->pts = pts;
2048
2049             for (i = 0; i < sp->sub.num_rects; i++)
2050             {
2051                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2052                 {
2053                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2054                     y = RGB_TO_Y_CCIR(r, g, b);
2055                     u = RGB_TO_U_CCIR(r, g, b, 0);
2056                     v = RGB_TO_V_CCIR(r, g, b, 0);
2057                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2058                 }
2059             }
2060
2061             /* now we can update the picture count */
2062             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2063                 is->subpq_windex = 0;
2064             SDL_LockMutex(is->subpq_mutex);
2065             is->subpq_size++;
2066             SDL_UnlockMutex(is->subpq_mutex);
2067         }
2068         av_free_packet(pkt);
2069     }
2070     return 0;
2071 }
2072
2073 /* copy samples for viewing in editor window */
2074 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2075 {
2076     int size, len;
2077
2078     size = samples_size / sizeof(short);
2079     while (size > 0) {
2080         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2081         if (len > size)
2082             len = size;
2083         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2084         samples += len;
2085         is->sample_array_index += len;
2086         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2087             is->sample_array_index = 0;
2088         size -= len;
2089     }
2090 }
2091
2092 /* return the wanted number of samples to get better sync if sync_type is video
2093  * or external master clock */
2094 static int synchronize_audio(VideoState *is, int nb_samples)
2095 {
2096     int wanted_nb_samples = nb_samples;
2097
2098     /* if not master, then we try to remove or add samples to correct the clock */
2099     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2100         double diff, avg_diff;
2101         int min_nb_samples, max_nb_samples;
2102
2103         diff = get_audio_clock(is) - get_master_clock(is);
2104
2105         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2106             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2107             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2108                 /* not enough measures to have a correct estimate */
2109                 is->audio_diff_avg_count++;
2110             } else {
2111                 /* estimate the A-V difference */
2112                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2113
2114                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2115                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2116                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2117                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2118                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2119                 }
2120                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2121                         diff, avg_diff, wanted_nb_samples - nb_samples,
2122                         is->audio_clock, is->audio_diff_threshold);
2123             }
2124         } else {
2125             /* too big difference : may be initial PTS errors, so
2126                reset A-V filter */
2127             is->audio_diff_avg_count = 0;
2128             is->audio_diff_cum       = 0;
2129         }
2130     }
2131
2132     return wanted_nb_samples;
2133 }
2134
2135 /**
2136  * Decode one audio frame and return its uncompressed size.
2137  *
2138  * The processed audio frame is decoded, converted if required, and
2139  * stored in is->audio_buf, with size in bytes given by the return
2140  * value.
2141  */
2142 static int audio_decode_frame(VideoState *is)
2143 {
2144     AVPacket *pkt_temp = &is->audio_pkt_temp;
2145     AVPacket *pkt = &is->audio_pkt;
2146     AVCodecContext *dec = is->audio_st->codec;
2147     int len1, data_size, resampled_data_size;
2148     int64_t dec_channel_layout;
2149     int got_frame;
2150     av_unused double audio_clock0;
2151     int new_packet = 0;
2152     int flush_complete = 0;
2153     int wanted_nb_samples;
2154     AVRational tb;
2155
2156     for (;;) {
2157         /* NOTE: the audio packet can contain several frames */
2158         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2159             if (!is->frame) {
2160                 if (!(is->frame = avcodec_alloc_frame()))
2161                     return AVERROR(ENOMEM);
2162             } else {
2163                 av_frame_unref(is->frame);
2164                 avcodec_get_frame_defaults(is->frame);
2165             }
2166
2167             if (is->audioq.serial != is->audio_pkt_temp_serial)
2168                 break;
2169
2170             if (is->paused)
2171                 return -1;
2172
2173             if (flush_complete)
2174                 break;
2175             new_packet = 0;
2176             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2177             if (len1 < 0) {
2178                 /* if error, we skip the frame */
2179                 pkt_temp->size = 0;
2180                 break;
2181             }
2182
2183             pkt_temp->data += len1;
2184             pkt_temp->size -= len1;
2185
2186             if (!got_frame) {
2187                 /* stop sending empty packets if the decoder is finished */
2188                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2189                     flush_complete = 1;
2190                 continue;
2191             }
2192
2193             if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
2194                 is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, dec->time_base);
2195             if (pkt_temp->pts != AV_NOPTS_VALUE)
2196                 pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2197             tb = dec->time_base;
2198
2199 #if CONFIG_AVFILTER
2200             {
2201                 int ret;
2202                 int reconfigure;
2203
2204                 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2205
2206                 reconfigure =
2207                     cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2208                                    is->frame->format, av_frame_get_channels(is->frame))    ||
2209                     is->audio_filter_src.channel_layout != dec_channel_layout ||
2210                     is->audio_filter_src.freq           != is->frame->sample_rate ||
2211                     is->audio_pkt_temp_serial           != is->audio_last_serial;
2212
2213                 if (reconfigure) {
2214                     char buf1[1024], buf2[1024];
2215                     av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2216                     av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2217                     av_log(NULL, AV_LOG_DEBUG,
2218                            "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2219                            is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2220                            is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2221
2222                     is->audio_filter_src.fmt            = is->frame->format;
2223                     is->audio_filter_src.channels       = av_frame_get_channels(is->frame);
2224                     is->audio_filter_src.channel_layout = dec_channel_layout;
2225                     is->audio_filter_src.freq           = is->frame->sample_rate;
2226                     is->audio_last_serial               = is->audio_pkt_temp_serial;
2227
2228                     if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2229                         return ret;
2230                 }
2231
2232                 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2233                     return ret;
2234                 av_frame_unref(is->frame);
2235                 if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0)
2236                     return ret;
2237                 tb = is->out_audio_filter->inputs[0]->time_base;
2238             }
2239 #endif
2240
2241             data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2242                                                    is->frame->nb_samples,
2243                                                    is->frame->format, 1);
2244
2245             dec_channel_layout =
2246                 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2247                 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2248             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2249
2250             if (is->frame->format        != is->audio_src.fmt            ||
2251                 dec_channel_layout       != is->audio_src.channel_layout ||
2252                 is->frame->sample_rate   != is->audio_src.freq           ||
2253                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2254                 swr_free(&is->swr_ctx);
2255                 is->swr_ctx = swr_alloc_set_opts(NULL,
2256                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2257                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2258                                                  0, NULL);
2259                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2260                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2261                             is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2262                             is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2263                     break;
2264                 }
2265                 is->audio_src.channel_layout = dec_channel_layout;
2266                 is->audio_src.channels       = av_frame_get_channels(is->frame);
2267                 is->audio_src.freq = is->frame->sample_rate;
2268                 is->audio_src.fmt = is->frame->format;
2269             }
2270
2271             if (is->swr_ctx) {
2272                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2273                 uint8_t **out = &is->audio_buf1;
2274                 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2275                 int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2276                 int len2;
2277                 if (wanted_nb_samples != is->frame->nb_samples) {
2278                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2279                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2280                         fprintf(stderr, "swr_set_compensation() failed\n");
2281                         break;
2282                     }
2283                 }
2284                 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2285                 if (!is->audio_buf1)
2286                     return AVERROR(ENOMEM);
2287                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2288                 if (len2 < 0) {
2289                     fprintf(stderr, "swr_convert() failed\n");
2290                     break;
2291                 }
2292                 if (len2 == out_count) {
2293                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2294                     swr_init(is->swr_ctx);
2295                 }
2296                 is->audio_buf = is->audio_buf1;
2297                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2298             } else {
2299                 is->audio_buf = is->frame->data[0];
2300                 resampled_data_size = data_size;
2301             }
2302
2303             audio_clock0 = is->audio_clock;
2304             /* update the audio clock with the pts */
2305             if (is->frame->pts != AV_NOPTS_VALUE) {
2306                 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2307                 is->audio_clock_serial = is->audio_pkt_temp_serial;
2308             }
2309 #ifdef DEBUG
2310             {
2311                 static double last_clock;
2312                 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2313                        is->audio_clock - last_clock,
2314                        is->audio_clock, audio_clock0);
2315                 last_clock = is->audio_clock;
2316             }
2317 #endif
2318             return resampled_data_size;
2319         }
2320
2321         /* free the current packet */
2322         if (pkt->data)
2323             av_free_packet(pkt);
2324         memset(pkt_temp, 0, sizeof(*pkt_temp));
2325
2326         if (is->audioq.abort_request) {
2327             return -1;
2328         }
2329
2330         if (is->audioq.nb_packets == 0)
2331             SDL_CondSignal(is->continue_read_thread);
2332
2333         /* read next packet */
2334         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2335             return -1;
2336
2337         if (pkt->data == flush_pkt.data) {
2338             avcodec_flush_buffers(dec);
2339             flush_complete = 0;
2340         }
2341
2342         *pkt_temp = *pkt;
2343     }
2344 }
2345
2346 /* prepare a new audio buffer */
2347 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2348 {
2349     VideoState *is = opaque;
2350     int audio_size, len1;
2351     int bytes_per_sec;
2352     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2353
2354     audio_callback_time = av_gettime();
2355
2356     while (len > 0) {
2357         if (is->audio_buf_index >= is->audio_buf_size) {
2358            audio_size = audio_decode_frame(is);
2359            if (audio_size < 0) {
2360                 /* if error, just output silence */
2361                is->audio_buf      = is->silence_buf;
2362                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2363            } else {
2364                if (is->show_mode != SHOW_MODE_VIDEO)
2365                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2366                is->audio_buf_size = audio_size;
2367            }
2368            is->audio_buf_index = 0;
2369         }
2370         len1 = is->audio_buf_size - is->audio_buf_index;
2371         if (len1 > len)
2372             len1 = len;
2373         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2374         len -= len1;
2375         stream += len1;
2376         is->audio_buf_index += len1;
2377     }
2378     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2379     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2380     /* Let's assume the audio driver that is used by SDL has two periods. */
2381     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2382     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2383     if (is->audioq.serial == is->audio_clock_serial)
2384         check_external_clock_sync(is, is->audio_current_pts);
2385 }
2386
2387 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2388 {
2389     SDL_AudioSpec wanted_spec, spec;
2390     const char *env;
2391     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2392
2393     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2394     if (env) {
2395         wanted_nb_channels = atoi(env);
2396         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2397     }
2398     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2399         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2400         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2401     }
2402     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2403     wanted_spec.freq = wanted_sample_rate;
2404     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2405         fprintf(stderr, "Invalid sample rate or channel count!\n");
2406         return -1;
2407     }
2408     wanted_spec.format = AUDIO_S16SYS;
2409     wanted_spec.silence = 0;
2410     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2411     wanted_spec.callback = sdl_audio_callback;
2412     wanted_spec.userdata = opaque;
2413     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2414         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2415         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2416         if (!wanted_spec.channels) {
2417             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2418             return -1;
2419         }
2420         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2421     }
2422     if (spec.format != AUDIO_S16SYS) {
2423         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2424         return -1;
2425     }
2426     if (spec.channels != wanted_spec.channels) {
2427         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2428         if (!wanted_channel_layout) {
2429             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2430             return -1;
2431         }
2432     }
2433
2434     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2435     audio_hw_params->freq = spec.freq;
2436     audio_hw_params->channel_layout = wanted_channel_layout;
2437     audio_hw_params->channels =  spec.channels;
2438     return spec.size;
2439 }
2440
2441 /* open a given stream. Return 0 if OK */
2442 static int stream_component_open(VideoState *is, int stream_index)
2443 {
2444     AVFormatContext *ic = is->ic;
2445     AVCodecContext *avctx;
2446     AVCodec *codec;
2447     const char *forced_codec_name = NULL;
2448     AVDictionary *opts;
2449     AVDictionaryEntry *t = NULL;
2450     int sample_rate, nb_channels;
2451     int64_t channel_layout;
2452     int ret;
2453
2454     if (stream_index < 0 || stream_index >= ic->nb_streams)
2455         return -1;
2456     avctx = ic->streams[stream_index]->codec;
2457
2458     codec = avcodec_find_decoder(avctx->codec_id);
2459
2460     switch(avctx->codec_type){
2461         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; forced_codec_name =    audio_codec_name; break;
2462         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2463         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; forced_codec_name =    video_codec_name; break;
2464     }
2465     if (forced_codec_name)
2466         codec = avcodec_find_decoder_by_name(forced_codec_name);
2467     if (!codec) {
2468         if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2469         else                   fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2470         return -1;
2471     }
2472
2473     avctx->codec_id = codec->id;
2474     avctx->workaround_bugs   = workaround_bugs;
2475     avctx->lowres            = lowres;
2476     if(avctx->lowres > codec->max_lowres){
2477         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2478                 codec->max_lowres);
2479         avctx->lowres= codec->max_lowres;
2480     }
2481     avctx->idct_algo         = idct;
2482     avctx->error_concealment = error_concealment;
2483
2484     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2485     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2486     if(codec->capabilities & CODEC_CAP_DR1)
2487         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2488
2489     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2490     if (!av_dict_get(opts, "threads", NULL, 0))
2491         av_dict_set(&opts, "threads", "auto", 0);
2492     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2493         av_dict_set(&opts, "refcounted_frames", "1", 0);
2494     if (avcodec_open2(avctx, codec, &opts) < 0)
2495         return -1;
2496     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2497         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2498         return AVERROR_OPTION_NOT_FOUND;
2499     }
2500
2501     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2502     switch (avctx->codec_type) {
2503     case AVMEDIA_TYPE_AUDIO:
2504 #if CONFIG_AVFILTER
2505         {
2506             AVFilterLink *link;
2507
2508             is->audio_filter_src.freq           = avctx->sample_rate;
2509             is->audio_filter_src.channels       = avctx->channels;
2510             is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2511             is->audio_filter_src.fmt            = avctx->sample_fmt;
2512             if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2513                 return ret;
2514             link = is->out_audio_filter->inputs[0];
2515             sample_rate    = link->sample_rate;
2516             nb_channels    = link->channels;
2517             channel_layout = link->channel_layout;
2518         }
2519 #else
2520         sample_rate    = avctx->sample_rate;
2521         nb_channels    = avctx->channels;
2522         channel_layout = avctx->channel_layout;
2523 #endif
2524
2525         /* prepare audio output */
2526         if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2527             return ret;
2528         is->audio_hw_buf_size = ret;
2529         is->audio_src = is->audio_tgt;
2530         is->audio_buf_size  = 0;
2531         is->audio_buf_index = 0;
2532
2533         /* init averaging filter */
2534         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2535         is->audio_diff_avg_count = 0;
2536         /* since we do not have a precise anough audio fifo fullness,
2537            we correct audio sync only if larger than this threshold */
2538         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2539
2540         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2541         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2542
2543         is->audio_stream = stream_index;
2544         is->audio_st = ic->streams[stream_index];
2545
2546         packet_queue_start(&is->audioq);
2547         SDL_PauseAudio(0);
2548         break;
2549     case AVMEDIA_TYPE_VIDEO:
2550         is->video_stream = stream_index;
2551         is->video_st = ic->streams[stream_index];
2552
2553         packet_queue_start(&is->videoq);
2554         is->video_tid = SDL_CreateThread(video_thread, is);
2555         break;
2556     case AVMEDIA_TYPE_SUBTITLE:
2557         is->subtitle_stream = stream_index;
2558         is->subtitle_st = ic->streams[stream_index];
2559         packet_queue_start(&is->subtitleq);
2560
2561         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2562         break;
2563     default:
2564         break;
2565     }
2566     return 0;
2567 }
2568
2569 static void stream_component_close(VideoState *is, int stream_index)
2570 {
2571     AVFormatContext *ic = is->ic;
2572     AVCodecContext *avctx;
2573
2574     if (stream_index < 0 || stream_index >= ic->nb_streams)
2575         return;
2576     avctx = ic->streams[stream_index]->codec;
2577
2578     switch (avctx->codec_type) {
2579     case AVMEDIA_TYPE_AUDIO:
2580         packet_queue_abort(&is->audioq);
2581
2582         SDL_CloseAudio();
2583
2584         packet_queue_flush(&is->audioq);
2585         av_free_packet(&is->audio_pkt);
2586         swr_free(&is->swr_ctx);
2587         av_freep(&is->audio_buf1);
2588         is->audio_buf1_size = 0;
2589         is->audio_buf = NULL;
2590         av_frame_free(&is->frame);
2591
2592         if (is->rdft) {
2593             av_rdft_end(is->rdft);
2594             av_freep(&is->rdft_data);
2595             is->rdft = NULL;
2596             is->rdft_bits = 0;
2597         }
2598 #if CONFIG_AVFILTER
2599         avfilter_graph_free(&is->agraph);
2600 #endif
2601         break;
2602     case AVMEDIA_TYPE_VIDEO:
2603         packet_queue_abort(&is->videoq);
2604
2605         /* note: we also signal this mutex to make sure we deblock the
2606            video thread in all cases */
2607         SDL_LockMutex(is->pictq_mutex);
2608         SDL_CondSignal(is->pictq_cond);
2609         SDL_UnlockMutex(is->pictq_mutex);
2610
2611         SDL_WaitThread(is->video_tid, NULL);
2612
2613         packet_queue_flush(&is->videoq);
2614         break;
2615     case AVMEDIA_TYPE_SUBTITLE:
2616         packet_queue_abort(&is->subtitleq);
2617
2618         /* note: we also signal this mutex to make sure we deblock the
2619            video thread in all cases */
2620         SDL_LockMutex(is->subpq_mutex);
2621         is->subtitle_stream_changed = 1;
2622
2623         SDL_CondSignal(is->subpq_cond);
2624         SDL_UnlockMutex(is->subpq_mutex);
2625
2626         SDL_WaitThread(is->subtitle_tid, NULL);
2627
2628         packet_queue_flush(&is->subtitleq);
2629         break;
2630     default:
2631         break;
2632     }
2633
2634     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2635     avcodec_close(avctx);
2636     switch (avctx->codec_type) {
2637     case AVMEDIA_TYPE_AUDIO:
2638         is->audio_st = NULL;
2639         is->audio_stream = -1;
2640         break;
2641     case AVMEDIA_TYPE_VIDEO:
2642         is->video_st = NULL;
2643         is->video_stream = -1;
2644         break;
2645     case AVMEDIA_TYPE_SUBTITLE:
2646         is->subtitle_st = NULL;
2647         is->subtitle_stream = -1;
2648         break;
2649     default:
2650         break;
2651     }
2652 }
2653
2654 static int decode_interrupt_cb(void *ctx)
2655 {
2656     VideoState *is = ctx;
2657     return is->abort_request;
2658 }
2659
2660 static int is_realtime(AVFormatContext *s)
2661 {
2662     if(   !strcmp(s->iformat->name, "rtp")
2663        || !strcmp(s->iformat->name, "rtsp")
2664        || !strcmp(s->iformat->name, "sdp")
2665     )
2666         return 1;
2667
2668     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2669                  || !strncmp(s->filename, "udp:", 4)
2670                 )
2671     )
2672         return 1;
2673     return 0;
2674 }
2675
2676 /* this thread gets the stream from the disk or the network */
2677 static int read_thread(void *arg)
2678 {
2679     VideoState *is = arg;
2680     AVFormatContext *ic = NULL;
2681     int err, i, ret;
2682     int st_index[AVMEDIA_TYPE_NB];
2683     AVPacket pkt1, *pkt = &pkt1;
2684     int eof = 0;
2685     int pkt_in_play_range = 0;
2686     AVDictionaryEntry *t;
2687     AVDictionary **opts;
2688     int orig_nb_streams;
2689     SDL_mutex *wait_mutex = SDL_CreateMutex();
2690
2691     memset(st_index, -1, sizeof(st_index));
2692     is->last_video_stream = is->video_stream = -1;
2693     is->last_audio_stream = is->audio_stream = -1;
2694     is->last_subtitle_stream = is->subtitle_stream = -1;
2695
2696     ic = avformat_alloc_context();
2697     ic->interrupt_callback.callback = decode_interrupt_cb;
2698     ic->interrupt_callback.opaque = is;
2699     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2700     if (err < 0) {
2701         print_error(is->filename, err);
2702         ret = -1;
2703         goto fail;
2704     }
2705     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2706         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2707         ret = AVERROR_OPTION_NOT_FOUND;
2708         goto fail;
2709     }
2710     is->ic = ic;
2711
2712     if (genpts)
2713         ic->flags |= AVFMT_FLAG_GENPTS;
2714
2715     opts = setup_find_stream_info_opts(ic, codec_opts);
2716     orig_nb_streams = ic->nb_streams;
2717
2718     err = avformat_find_stream_info(ic, opts);
2719     if (err < 0) {
2720         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2721         ret = -1;
2722         goto fail;
2723     }
2724     for (i = 0; i < orig_nb_streams; i++)
2725         av_dict_free(&opts[i]);
2726     av_freep(&opts);
2727
2728     if (ic->pb)
2729         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2730
2731     if (seek_by_bytes < 0)
2732         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2733
2734     is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2735
2736     /* if seeking requested, we execute it */
2737     if (start_time != AV_NOPTS_VALUE) {
2738         int64_t timestamp;
2739
2740         timestamp = start_time;
2741         /* add the stream start time */
2742         if (ic->start_time != AV_NOPTS_VALUE)
2743             timestamp += ic->start_time;
2744         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2745         if (ret < 0) {
2746             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2747                     is->filename, (double)timestamp / AV_TIME_BASE);
2748         }
2749     }
2750
2751     is->realtime = is_realtime(ic);
2752
2753     for (i = 0; i < ic->nb_streams; i++)
2754         ic->streams[i]->discard = AVDISCARD_ALL;
2755     if (!video_disable)
2756         st_index[AVMEDIA_TYPE_VIDEO] =
2757             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2758                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2759     if (!audio_disable)
2760         st_index[AVMEDIA_TYPE_AUDIO] =
2761             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2762                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2763                                 st_index[AVMEDIA_TYPE_VIDEO],
2764                                 NULL, 0);
2765     if (!video_disable && !subtitle_disable)
2766         st_index[AVMEDIA_TYPE_SUBTITLE] =
2767             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2768                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2769                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2770                                  st_index[AVMEDIA_TYPE_AUDIO] :
2771                                  st_index[AVMEDIA_TYPE_VIDEO]),
2772                                 NULL, 0);
2773     if (show_status) {
2774         av_dump_format(ic, 0, is->filename, 0);
2775     }
2776
2777     is->show_mode = show_mode;
2778
2779     /* open the streams */
2780     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2781         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2782     }
2783
2784     ret = -1;
2785     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2786         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2787     }
2788     if (is->show_mode == SHOW_MODE_NONE)
2789         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2790
2791     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2792         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2793     }
2794
2795     if (is->video_stream < 0 && is->audio_stream < 0) {
2796         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2797         ret = -1;
2798         goto fail;
2799     }
2800
2801     if (infinite_buffer < 0 && is->realtime)
2802         infinite_buffer = 1;
2803
2804     for (;;) {
2805         if (is->abort_request)
2806             break;
2807         if (is->paused != is->last_paused) {
2808             is->last_paused = is->paused;
2809             if (is->paused)
2810                 is->read_pause_return = av_read_pause(ic);
2811             else
2812                 av_read_play(ic);
2813         }
2814 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2815         if (is->paused &&
2816                 (!strcmp(ic->iformat->name, "rtsp") ||
2817                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2818             /* wait 10 ms to avoid trying to get another packet */
2819             /* XXX: horrible */
2820             SDL_Delay(10);
2821             continue;
2822         }
2823 #endif
2824         if (is->seek_req) {
2825             int64_t seek_target = is->seek_pos;
2826             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2827             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2828 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2829 //      of the seek_pos/seek_rel variables
2830
2831             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2832             if (ret < 0) {
2833                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2834             } else {
2835                 if (is->audio_stream >= 0) {
2836                     packet_queue_flush(&is->audioq);
2837                     packet_queue_put(&is->audioq, &flush_pkt);
2838                 }
2839                 if (is->subtitle_stream >= 0) {
2840                     packet_queue_flush(&is->subtitleq);
2841                     packet_queue_put(&is->subtitleq, &flush_pkt);
2842                 }
2843                 if (is->video_stream >= 0) {
2844                     packet_queue_flush(&is->videoq);
2845                     packet_queue_put(&is->videoq, &flush_pkt);
2846                 }
2847                 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2848                    update_external_clock_pts(is, NAN);
2849                 } else {
2850                    update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
2851                 }
2852             }
2853             is->seek_req = 0;
2854             eof = 0;
2855             if (is->paused)
2856                 step_to_next_frame(is);
2857         }
2858         if (is->queue_attachments_req) {
2859             avformat_queue_attached_pictures(ic);
2860             is->queue_attachments_req = 0;
2861         }
2862
2863         /* if the queue are full, no need to read more */
2864         if (infinite_buffer<1 &&
2865               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2866             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2867                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2868                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2869             /* wait 10 ms */
2870             SDL_LockMutex(wait_mutex);
2871             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2872             SDL_UnlockMutex(wait_mutex);
2873             continue;
2874         }
2875         if (eof) {
2876             if (is->video_stream >= 0) {
2877                 av_init_packet(pkt);
2878                 pkt->data = NULL;
2879                 pkt->size = 0;
2880                 pkt->stream_index = is->video_stream;
2881                 packet_queue_put(&is->videoq, pkt);
2882             }
2883             if (is->audio_stream >= 0 &&
2884                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2885                 av_init_packet(pkt);
2886                 pkt->data = NULL;
2887                 pkt->size = 0;
2888                 pkt->stream_index = is->audio_stream;
2889                 packet_queue_put(&is->audioq, pkt);
2890             }
2891             SDL_Delay(10);
2892             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2893                 if (loop != 1 && (!loop || --loop)) {
2894                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2895                 } else if (autoexit) {
2896                     ret = AVERROR_EOF;
2897                     goto fail;
2898                 }
2899             }
2900             eof=0;
2901             continue;
2902         }
2903         ret = av_read_frame(ic, pkt);
2904         if (ret < 0) {
2905             if (ret == AVERROR_EOF || url_feof(ic->pb))
2906                 eof = 1;
2907             if (ic->pb && ic->pb->error)
2908                 break;
2909             SDL_LockMutex(wait_mutex);
2910             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2911             SDL_UnlockMutex(wait_mutex);
2912             continue;
2913         }
2914         /* check if packet is in play range specified by user, then queue, otherwise discard */
2915         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2916                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2917                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2918                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2919                 <= ((double)duration / 1000000);
2920         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2921             packet_queue_put(&is->audioq, pkt);
2922         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2923             packet_queue_put(&is->videoq, pkt);
2924         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2925             packet_queue_put(&is->subtitleq, pkt);
2926         } else {
2927             av_free_packet(pkt);
2928         }
2929     }
2930     /* wait until the end */
2931     while (!is->abort_request) {
2932         SDL_Delay(100);
2933     }
2934
2935     ret = 0;
2936  fail:
2937     /* close each stream */
2938     if (is->audio_stream >= 0)
2939         stream_component_close(is, is->audio_stream);
2940     if (is->video_stream >= 0)
2941         stream_component_close(is, is->video_stream);
2942     if (is->subtitle_stream >= 0)
2943         stream_component_close(is, is->subtitle_stream);
2944     if (is->ic) {
2945         avformat_close_input(&is->ic);
2946     }
2947
2948     if (ret != 0) {
2949         SDL_Event event;
2950
2951         event.type = FF_QUIT_EVENT;
2952         event.user.data1 = is;
2953         SDL_PushEvent(&event);
2954     }
2955     SDL_DestroyMutex(wait_mutex);
2956     return 0;
2957 }
2958
2959 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2960 {
2961     VideoState *is;
2962
2963     is = av_mallocz(sizeof(VideoState));
2964     if (!is)
2965         return NULL;
2966     av_strlcpy(is->filename, filename, sizeof(is->filename));
2967     is->iformat = iformat;
2968     is->ytop    = 0;
2969     is->xleft   = 0;
2970
2971     /* start video display */
2972     is->pictq_mutex = SDL_CreateMutex();
2973     is->pictq_cond  = SDL_CreateCond();
2974
2975     is->subpq_mutex = SDL_CreateMutex();
2976     is->subpq_cond  = SDL_CreateCond();
2977
2978     packet_queue_init(&is->videoq);
2979     packet_queue_init(&is->audioq);
2980     packet_queue_init(&is->subtitleq);
2981
2982     is->continue_read_thread = SDL_CreateCond();
2983
2984     update_external_clock_pts(is, NAN);
2985     update_external_clock_speed(is, 1.0);
2986     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2987     is->video_current_pts_drift = is->audio_current_pts_drift;
2988     is->audio_clock_serial = -1;
2989     is->video_clock_serial = -1;
2990     is->audio_last_serial = -1;
2991     is->av_sync_type = av_sync_type;
2992     is->read_tid     = SDL_CreateThread(read_thread, is);
2993     if (!is->read_tid) {
2994         av_free(is);
2995         return NULL;
2996     }
2997     return is;
2998 }
2999
3000 static void stream_cycle_channel(VideoState *is, int codec_type)
3001 {
3002     AVFormatContext *ic = is->ic;
3003     int start_index, stream_index;
3004     int old_index;
3005     AVStream *st;
3006
3007     if (codec_type == AVMEDIA_TYPE_VIDEO) {
3008         start_index = is->last_video_stream;
3009         old_index = is->video_stream;
3010     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3011         start_index = is->last_audio_stream;
3012         old_index = is->audio_stream;
3013     } else {
3014         start_index = is->last_subtitle_stream;
3015         old_index = is->subtitle_stream;
3016     }
3017     stream_index = start_index;
3018     for (;;) {
3019         if (++stream_index >= is->ic->nb_streams)
3020         {
3021             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3022             {
3023                 stream_index = -1;
3024                 is->last_subtitle_stream = -1;
3025                 goto the_end;
3026             }
3027             if (start_index == -1)
3028                 return;
3029             stream_index = 0;
3030         }
3031         if (stream_index == start_index)
3032             return;
3033         st = ic->streams[stream_index];
3034         if (st->codec->codec_type == codec_type) {
3035             /* check that parameters are OK */
3036             switch (codec_type) {
3037             case AVMEDIA_TYPE_AUDIO:
3038                 if (st->codec->sample_rate != 0 &&
3039                     st->codec->channels != 0)
3040                     goto the_end;
3041                 break;
3042             case AVMEDIA_TYPE_VIDEO:
3043             case AVMEDIA_TYPE_SUBTITLE:
3044                 goto the_end;
3045             default:
3046                 break;
3047             }
3048         }
3049     }
3050  the_end:
3051     stream_component_close(is, old_index);
3052     stream_component_open(is, stream_index);
3053     if (codec_type == AVMEDIA_TYPE_VIDEO)
3054         is->queue_attachments_req = 1;
3055 }
3056
3057
3058 static void toggle_full_screen(VideoState *is)
3059 {
3060 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3061     /* OS X needs to reallocate the SDL overlays */
3062     int i;
3063     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3064         is->pictq[i].reallocate = 1;
3065 #endif
3066     is_full_screen = !is_full_screen;
3067     video_open(is, 1, NULL);
3068 }
3069
3070 static void toggle_audio_display(VideoState *is)
3071 {
3072     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3073     int next = is->show_mode;
3074     do {
3075         next = (next + 1) % SHOW_MODE_NB;
3076     } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3077     if (is->show_mode != next) {
3078         fill_rectangle(screen,
3079                     is->xleft, is->ytop, is->width, is->height,
3080                     bgcolor, 1);
3081         is->force_refresh = 1;
3082         is->show_mode = next;
3083     }
3084 }
3085
3086 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3087     double remaining_time = 0.0;
3088     SDL_PumpEvents();
3089     while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3090         if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3091             SDL_ShowCursor(0);
3092             cursor_hidden = 1;
3093         }
3094         if (remaining_time > 0.0)
3095             av_usleep((int64_t)(remaining_time * 1000000.0));
3096         remaining_time = REFRESH_RATE;
3097         if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3098             video_refresh(is, &remaining_time);
3099         SDL_PumpEvents();
3100     }
3101 }
3102
3103 /* handle an event sent by the GUI */
3104 static void event_loop(VideoState *cur_stream)
3105 {
3106     SDL_Event event;
3107     double incr, pos, frac;
3108
3109     for (;;) {
3110         double x;
3111         refresh_loop_wait_event(cur_stream, &event);
3112         switch (event.type) {
3113         case SDL_KEYDOWN:
3114             if (exit_on_keydown) {
3115                 do_exit(cur_stream);
3116                 break;
3117             }
3118             switch (event.key.keysym.sym) {
3119             case SDLK_ESCAPE:
3120             case SDLK_q:
3121                 do_exit(cur_stream);
3122                 break;
3123             case SDLK_f:
3124                 toggle_full_screen(cur_stream);
3125                 cur_stream->force_refresh = 1;
3126                 break;
3127             case SDLK_p:
3128             case SDLK_SPACE:
3129                 toggle_pause(cur_stream);
3130                 break;
3131             case SDLK_s: // S: Step to next frame
3132                 step_to_next_frame(cur_stream);
3133                 break;
3134             case SDLK_a:
3135                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3136                 break;
3137             case SDLK_v:
3138                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3139                 break;
3140             case SDLK_t:
3141                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3142                 break;
3143             case SDLK_w:
3144                 toggle_audio_display(cur_stream);
3145                 break;
3146             case SDLK_PAGEUP:
3147                 incr = 600.0;
3148                 goto do_seek;
3149             case SDLK_PAGEDOWN:
3150                 incr = -600.0;
3151                 goto do_seek;
3152             case SDLK_LEFT:
3153                 incr = -10.0;
3154                 goto do_seek;
3155             case SDLK_RIGHT:
3156                 incr = 10.0;
3157                 goto do_seek;
3158             case SDLK_UP:
3159                 incr = 60.0;
3160                 goto do_seek;
3161             case SDLK_DOWN:
3162                 incr = -60.0;
3163             do_seek:
3164                     if (seek_by_bytes) {
3165                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3166                             pos = cur_stream->video_current_pos;
3167                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3168                             pos = cur_stream->audio_pkt.pos;
3169                         } else
3170                             pos = avio_tell(cur_stream->ic->pb);
3171                         if (cur_stream->ic->bit_rate)
3172                             incr *= cur_stream->ic->bit_rate / 8.0;
3173                         else
3174                             incr *= 180000.0;
3175                         pos += incr;
3176                         stream_seek(cur_stream, pos, incr, 1);
3177                     } else {
3178                         pos = get_master_clock(cur_stream);
3179                         if (isnan(pos))
3180                             pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3181                         pos += incr;
3182                         if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3183                             pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3184                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3185                     }
3186                 break;
3187             default:
3188                 break;
3189             }
3190             break;
3191         case SDL_VIDEOEXPOSE:
3192             cur_stream->force_refresh = 1;
3193             break;
3194         case SDL_MOUSEBUTTONDOWN:
3195             if (exit_on_mousedown) {
3196                 do_exit(cur_stream);
3197                 break;
3198             }
3199         case SDL_MOUSEMOTION:
3200             if (cursor_hidden) {
3201                 SDL_ShowCursor(1);
3202                 cursor_hidden = 0;
3203             }
3204             cursor_last_shown = av_gettime();
3205             if (event.type == SDL_MOUSEBUTTONDOWN) {
3206                 x = event.button.x;
3207             } else {
3208                 if (event.motion.state != SDL_PRESSED)
3209                     break;
3210                 x = event.motion.x;
3211             }
3212                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3213                     uint64_t size =  avio_size(cur_stream->ic->pb);
3214                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3215                 } else {
3216                     int64_t ts;
3217                     int ns, hh, mm, ss;
3218                     int tns, thh, tmm, tss;
3219                     tns  = cur_stream->ic->duration / 1000000LL;
3220                     thh  = tns / 3600;
3221                     tmm  = (tns % 3600) / 60;
3222                     tss  = (tns % 60);
3223                     frac = x / cur_stream->width;
3224                     ns   = frac * tns;
3225                     hh   = ns / 3600;
3226                     mm   = (ns % 3600) / 60;
3227                     ss   = (ns % 60);
3228                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
3229                             hh, mm, ss, thh, tmm, tss);
3230                     ts = frac * cur_stream->ic->duration;
3231                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3232                         ts += cur_stream->ic->start_time;
3233                     stream_seek(cur_stream, ts, 0, 0);
3234                 }
3235             break;
3236         case SDL_VIDEORESIZE:
3237                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3238                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3239                 screen_width  = cur_stream->width  = event.resize.w;
3240                 screen_height = cur_stream->height = event.resize.h;
3241                 cur_stream->force_refresh = 1;
3242             break;
3243         case SDL_QUIT:
3244         case FF_QUIT_EVENT:
3245             do_exit(cur_stream);
3246             break;
3247         case FF_ALLOC_EVENT:
3248             alloc_picture(event.user.data1);
3249             break;
3250         default:
3251             break;
3252         }
3253     }
3254 }
3255
3256 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3257 {
3258     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3259     return opt_default(NULL, "video_size", arg);
3260 }
3261
3262 static int opt_width(void *optctx, const char *opt, const char *arg)
3263 {
3264     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3265     return 0;
3266 }
3267
3268 static int opt_height(void *optctx, const char *opt, const char *arg)
3269 {
3270     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3271     return 0;
3272 }
3273
3274 static int opt_format(void *optctx, const char *opt, const char *arg)
3275 {
3276     file_iformat = av_find_input_format(arg);
3277     if (!file_iformat) {
3278         fprintf(stderr, "Unknown input format: %s\n", arg);
3279         return AVERROR(EINVAL);
3280     }
3281     return 0;
3282 }
3283
3284 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3285 {
3286     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3287     return opt_default(NULL, "pixel_format", arg);
3288 }
3289
3290 static int opt_sync(void *optctx, const char *opt, const char *arg)
3291 {
3292     if (!strcmp(arg, "audio"))
3293         av_sync_type = AV_SYNC_AUDIO_MASTER;
3294     else if (!strcmp(arg, "video"))
3295         av_sync_type = AV_SYNC_VIDEO_MASTER;
3296     else if (!strcmp(arg, "ext"))
3297         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3298     else {
3299         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3300         exit(1);
3301     }
3302     return 0;
3303 }
3304
3305 static int opt_seek(void *optctx, const char *opt, const char *arg)
3306 {
3307     start_time = parse_time_or_die(opt, arg, 1);
3308     return 0;
3309 }
3310
3311 static int opt_duration(void *optctx, const char *opt, const char *arg)
3312 {
3313     duration = parse_time_or_die(opt, arg, 1);
3314     return 0;
3315 }
3316
3317 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3318 {
3319     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3320                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3321                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3322                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3323     return 0;
3324 }
3325
3326 static void opt_input_file(void *optctx, const char *filename)
3327 {
3328     if (input_filename) {
3329         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3330                 filename, input_filename);
3331         exit(1);
3332     }
3333     if (!strcmp(filename, "-"))
3334         filename = "pipe:";
3335     input_filename = filename;
3336 }
3337
3338 static int opt_codec(void *optctx, const char *opt, const char *arg)
3339 {
3340    const char *spec = strchr(opt, ':');
3341    if (!spec) {
3342        fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3343                arg, opt);
3344        return AVERROR(EINVAL);
3345    }
3346    spec++;
3347    switch (spec[0]) {
3348    case 'a' :    audio_codec_name = arg; break;
3349    case 's' : subtitle_codec_name = arg; break;
3350    case 'v' :    video_codec_name = arg; break;
3351    default:
3352        fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3353        return AVERROR(EINVAL);
3354    }
3355    return 0;
3356 }
3357
3358 static int dummy;
3359
3360 static const OptionDef options[] = {
3361 #include "cmdutils_common_opts.h"
3362     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3363     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3364     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3365     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3366     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3367     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3368     { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3369     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3370     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3371     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3372     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3373     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3374     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3375     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3376     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3377     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3378     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3379     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3380     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3381     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3382     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3383     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3384     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3385     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3386     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3387     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3388     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3389     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3390     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3391     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3392     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3393     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3394 #if CONFIG_AVFILTER
3395     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3396     { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3397 #endif
3398     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3399     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3400     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3401     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3402     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3403     { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &audio_codec_name }, "force audio decoder",    "decoder_name" },
3404     { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3405     { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &video_codec_name }, "force video decoder",    "decoder_name" },
3406     { NULL, },
3407 };
3408
3409 static void show_usage(void)
3410 {
3411     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3412     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3413     av_log(NULL, AV_LOG_INFO, "\n");
3414 }
3415
3416 void show_help_default(const char *opt, const char *arg)
3417 {
3418     av_log_set_callback(log_callback_help);
3419     show_usage();
3420     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3421     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3422     printf("\n");
3423     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3424     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3425 #if !CONFIG_AVFILTER
3426     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3427 #else
3428     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3429 #endif
3430     printf("\nWhile playing:\n"
3431            "q, ESC              quit\n"
3432            "f                   toggle full screen\n"
3433            "p, SPC              pause\n"
3434            "a                   cycle audio channel\n"
3435            "v                   cycle video channel\n"
3436            "t                   cycle subtitle channel\n"
3437            "w                   show audio waves\n"
3438            "s                   activate frame-step mode\n"
3439            "left/right          seek backward/forward 10 seconds\n"
3440            "down/up             seek backward/forward 1 minute\n"
3441            "page down/page up   seek backward/forward 10 minutes\n"
3442            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3443            );
3444 }
3445
3446 static int lockmgr(void **mtx, enum AVLockOp op)
3447 {
3448    switch(op) {
3449       case AV_LOCK_CREATE:
3450           *mtx = SDL_CreateMutex();
3451           if(!*mtx)
3452               return 1;
3453           return 0;
3454       case AV_LOCK_OBTAIN:
3455           return !!SDL_LockMutex(*mtx);
3456       case AV_LOCK_RELEASE:
3457           return !!SDL_UnlockMutex(*mtx);
3458       case AV_LOCK_DESTROY:
3459           SDL_DestroyMutex(*mtx);
3460           return 0;
3461    }
3462    return 1;
3463 }
3464
3465 /* Called from the main */
3466 int main(int argc, char **argv)
3467 {
3468     int flags;
3469     VideoState *is;
3470     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3471
3472     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3473     parse_loglevel(argc, argv, options);
3474
3475     /* register all codecs, demux and protocols */
3476     avcodec_register_all();
3477 #if CONFIG_AVDEVICE
3478     avdevice_register_all();
3479 #endif
3480 #if CONFIG_AVFILTER
3481     avfilter_register_all();
3482 #endif
3483     av_register_all();
3484     avformat_network_init();
3485
3486     init_opts();
3487
3488     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3489     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3490
3491     show_banner(argc, argv, options);
3492
3493     parse_options(NULL, argc, argv, options, opt_input_file);
3494
3495     if (!input_filename) {
3496         show_usage();
3497         fprintf(stderr, "An input file must be specified\n");
3498         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3499         exit(1);
3500     }
3501
3502     if (display_disable) {
3503         video_disable = 1;
3504     }
3505     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3506     if (audio_disable)
3507         flags &= ~SDL_INIT_AUDIO;
3508     if (display_disable)
3509         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3510 #if !defined(__MINGW32__) && !defined(__APPLE__)
3511     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3512 #endif
3513     if (SDL_Init (flags)) {
3514         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3515         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3516         exit(1);
3517     }
3518
3519     if (!display_disable) {
3520         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3521         fs_screen_width = vi->current_w;
3522         fs_screen_height = vi->current_h;
3523     }
3524
3525     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3526     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3527     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3528
3529     if (av_lockmgr_register(lockmgr)) {
3530         fprintf(stderr, "Could not initialize lock manager!\n");
3531         do_exit(NULL);
3532     }
3533
3534     av_init_packet(&flush_pkt);
3535     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3536
3537     is = stream_open(input_filename, file_iformat);
3538     if (!is) {
3539         fprintf(stderr, "Failed to initialize VideoState!\n");
3540         do_exit(NULL);
3541     }
3542
3543     event_loop(is);
3544
3545     /* never returns */
3546
3547     return 0;
3548 }