ffplay: use 0 frame delay if redisplaying an already displayed frame
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/buffersink.h"
52 # include "libavfilter/buffersrc.h"
53 #endif
54
55 #include <SDL.h>
56 #include <SDL_thread.h>
57
58 #include "cmdutils.h"
59
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the minimum AV sync threshold */
73 #define AV_SYNC_THRESHOLD_MIN 0.01
74 /* AV sync correction is done if above the maximum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MAX 0.1
76 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
77 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
78 /* no AV correction is done if too big error */
79 #define AV_NOSYNC_THRESHOLD 10.0
80
81 /* maximum audio speed change to get correct sync */
82 #define SAMPLE_CORRECTION_PERCENT_MAX 10
83
84 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
85 #define EXTERNAL_CLOCK_SPEED_MIN  0.900
86 #define EXTERNAL_CLOCK_SPEED_MAX  1.010
87 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
88
89 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
90 #define AUDIO_DIFF_AVG_NB   20
91
92 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
93 #define REFRESH_RATE 0.01
94
95 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
96 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
97 #define SAMPLE_ARRAY_SIZE (8 * 65536)
98
99 #define CURSOR_HIDE_DELAY 1000000
100
101 static int64_t sws_flags = SWS_BICUBIC;
102
103 typedef struct MyAVPacketList {
104     AVPacket pkt;
105     struct MyAVPacketList *next;
106     int serial;
107 } MyAVPacketList;
108
109 typedef struct PacketQueue {
110     MyAVPacketList *first_pkt, *last_pkt;
111     int nb_packets;
112     int size;
113     int abort_request;
114     int serial;
115     SDL_mutex *mutex;
116     SDL_cond *cond;
117 } PacketQueue;
118
119 #define VIDEO_PICTURE_QUEUE_SIZE 3
120 #define SUBPICTURE_QUEUE_SIZE 4
121
122 typedef struct VideoPicture {
123     double pts;             // presentation timestamp for this picture
124     int64_t pos;            // byte position in file
125     SDL_Overlay *bmp;
126     int width, height; /* source height & width */
127     int allocated;
128     int reallocate;
129     int serial;
130
131     AVRational sar;
132 } VideoPicture;
133
134 typedef struct SubPicture {
135     double pts; /* presentation time stamp for this picture */
136     AVSubtitle sub;
137 } SubPicture;
138
139 typedef struct AudioParams {
140     int freq;
141     int channels;
142     int64_t channel_layout;
143     enum AVSampleFormat fmt;
144 } AudioParams;
145
146 typedef struct Clock {
147     double pts;           /* clock base */
148     double pts_drift;     /* clock base minus time at which we updated the clock */
149     double last_updated;
150     double speed;
151     int serial;           /* clock is based on a packet with this serial */
152     int paused;
153     int *queue_serial;    /* pointer to the current packet queue serial, used for obsolete clock detection */
154 } Clock;
155
156 enum {
157     AV_SYNC_AUDIO_MASTER, /* default choice */
158     AV_SYNC_VIDEO_MASTER,
159     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
160 };
161
162 typedef struct VideoState {
163     SDL_Thread *read_tid;
164     SDL_Thread *video_tid;
165     AVInputFormat *iformat;
166     int no_background;
167     int abort_request;
168     int force_refresh;
169     int paused;
170     int last_paused;
171     int queue_attachments_req;
172     int seek_req;
173     int seek_flags;
174     int64_t seek_pos;
175     int64_t seek_rel;
176     int read_pause_return;
177     AVFormatContext *ic;
178     int realtime;
179
180     Clock audclk;
181     Clock vidclk;
182     Clock extclk;
183
184     int audio_stream;
185
186     int av_sync_type;
187
188     double audio_clock;
189     int audio_clock_serial;
190     double audio_diff_cum; /* used for AV difference average computation */
191     double audio_diff_avg_coef;
192     double audio_diff_threshold;
193     int audio_diff_avg_count;
194     AVStream *audio_st;
195     PacketQueue audioq;
196     int audio_hw_buf_size;
197     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
198     uint8_t *audio_buf;
199     uint8_t *audio_buf1;
200     unsigned int audio_buf_size; /* in bytes */
201     unsigned int audio_buf1_size;
202     int audio_buf_index; /* in bytes */
203     int audio_write_buf_size;
204     int audio_buf_frames_pending;
205     AVPacket audio_pkt_temp;
206     AVPacket audio_pkt;
207     int audio_pkt_temp_serial;
208     int audio_last_serial;
209     struct AudioParams audio_src;
210 #if CONFIG_AVFILTER
211     struct AudioParams audio_filter_src;
212 #endif
213     struct AudioParams audio_tgt;
214     struct SwrContext *swr_ctx;
215     double audio_current_pts;
216     double audio_current_pts_drift;
217     int frame_drops_early;
218     int frame_drops_late;
219     AVFrame *frame;
220
221     enum ShowMode {
222         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
223     } show_mode;
224     int16_t sample_array[SAMPLE_ARRAY_SIZE];
225     int sample_array_index;
226     int last_i_start;
227     RDFTContext *rdft;
228     int rdft_bits;
229     FFTSample *rdft_data;
230     int xpos;
231     double last_vis_time;
232
233     SDL_Thread *subtitle_tid;
234     int subtitle_stream;
235     int subtitle_stream_changed;
236     AVStream *subtitle_st;
237     PacketQueue subtitleq;
238     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
239     int subpq_size, subpq_rindex, subpq_windex;
240     SDL_mutex *subpq_mutex;
241     SDL_cond *subpq_cond;
242
243     double frame_timer;
244     double frame_last_pts;
245     double frame_last_duration;
246     double frame_last_dropped_pts;
247     double frame_last_returned_time;
248     double frame_last_filter_delay;
249     int64_t frame_last_dropped_pos;
250     int frame_last_dropped_serial;
251     int video_stream;
252     AVStream *video_st;
253     PacketQueue videoq;
254     double video_current_pts;       // current displayed pts
255     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
256     int64_t video_current_pos;      // current displayed file pos
257     double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
258     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
259     int pictq_size, pictq_rindex, pictq_windex;
260     SDL_mutex *pictq_mutex;
261     SDL_cond *pictq_cond;
262 #if !CONFIG_AVFILTER
263     struct SwsContext *img_convert_ctx;
264 #endif
265     SDL_Rect last_display_rect;
266
267     char filename[1024];
268     int width, height, xleft, ytop;
269     int step;
270
271 #if CONFIG_AVFILTER
272     AVFilterContext *in_video_filter;   // the first filter in the video chain
273     AVFilterContext *out_video_filter;  // the last filter in the video chain
274     AVFilterContext *in_audio_filter;   // the first filter in the audio chain
275     AVFilterContext *out_audio_filter;  // the last filter in the audio chain
276     AVFilterGraph *agraph;              // audio filter graph
277 #endif
278
279     int last_video_stream, last_audio_stream, last_subtitle_stream;
280
281     SDL_cond *continue_read_thread;
282 } VideoState;
283
284 /* options specified by the user */
285 static AVInputFormat *file_iformat;
286 static const char *input_filename;
287 static const char *window_title;
288 static int fs_screen_width;
289 static int fs_screen_height;
290 static int default_width  = 640;
291 static int default_height = 480;
292 static int screen_width  = 0;
293 static int screen_height = 0;
294 static int audio_disable;
295 static int video_disable;
296 static int subtitle_disable;
297 static int wanted_stream[AVMEDIA_TYPE_NB] = {
298     [AVMEDIA_TYPE_AUDIO]    = -1,
299     [AVMEDIA_TYPE_VIDEO]    = -1,
300     [AVMEDIA_TYPE_SUBTITLE] = -1,
301 };
302 static int seek_by_bytes = -1;
303 static int display_disable;
304 static int show_status = 1;
305 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
306 static int64_t start_time = AV_NOPTS_VALUE;
307 static int64_t duration = AV_NOPTS_VALUE;
308 static int workaround_bugs = 1;
309 static int fast = 0;
310 static int genpts = 0;
311 static int lowres = 0;
312 static int idct = FF_IDCT_AUTO;
313 static int error_concealment = 3;
314 static int decoder_reorder_pts = -1;
315 static int autoexit;
316 static int exit_on_keydown;
317 static int exit_on_mousedown;
318 static int loop = 1;
319 static int framedrop = -1;
320 static int infinite_buffer = -1;
321 static enum ShowMode show_mode = SHOW_MODE_NONE;
322 static const char *audio_codec_name;
323 static const char *subtitle_codec_name;
324 static const char *video_codec_name;
325 double rdftspeed = 0.02;
326 static int64_t cursor_last_shown;
327 static int cursor_hidden = 0;
328 #if CONFIG_AVFILTER
329 static char *vfilters = NULL;
330 static char *afilters = NULL;
331 #endif
332
333 /* current context */
334 static int is_full_screen;
335 static int64_t audio_callback_time;
336
337 static AVPacket flush_pkt;
338
339 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
340 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
341
342 static SDL_Surface *screen;
343
344 static inline
345 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
346                    enum AVSampleFormat fmt2, int64_t channel_count2)
347 {
348     /* If channel count == 1, planar and non-planar formats are the same */
349     if (channel_count1 == 1 && channel_count2 == 1)
350         return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
351     else
352         return channel_count1 != channel_count2 || fmt1 != fmt2;
353 }
354
355 static inline
356 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
357 {
358     if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
359         return channel_layout;
360     else
361         return 0;
362 }
363
364 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
365
366 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
367 {
368     MyAVPacketList *pkt1;
369
370     if (q->abort_request)
371        return -1;
372
373     pkt1 = av_malloc(sizeof(MyAVPacketList));
374     if (!pkt1)
375         return -1;
376     pkt1->pkt = *pkt;
377     pkt1->next = NULL;
378     if (pkt == &flush_pkt)
379         q->serial++;
380     pkt1->serial = q->serial;
381
382     if (!q->last_pkt)
383         q->first_pkt = pkt1;
384     else
385         q->last_pkt->next = pkt1;
386     q->last_pkt = pkt1;
387     q->nb_packets++;
388     q->size += pkt1->pkt.size + sizeof(*pkt1);
389     /* XXX: should duplicate packet data in DV case */
390     SDL_CondSignal(q->cond);
391     return 0;
392 }
393
394 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
395 {
396     int ret;
397
398     /* duplicate the packet */
399     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
400         return -1;
401
402     SDL_LockMutex(q->mutex);
403     ret = packet_queue_put_private(q, pkt);
404     SDL_UnlockMutex(q->mutex);
405
406     if (pkt != &flush_pkt && ret < 0)
407         av_free_packet(pkt);
408
409     return ret;
410 }
411
412 /* packet queue handling */
413 static void packet_queue_init(PacketQueue *q)
414 {
415     memset(q, 0, sizeof(PacketQueue));
416     q->mutex = SDL_CreateMutex();
417     q->cond = SDL_CreateCond();
418     q->abort_request = 1;
419 }
420
421 static void packet_queue_flush(PacketQueue *q)
422 {
423     MyAVPacketList *pkt, *pkt1;
424
425     SDL_LockMutex(q->mutex);
426     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
427         pkt1 = pkt->next;
428         av_free_packet(&pkt->pkt);
429         av_freep(&pkt);
430     }
431     q->last_pkt = NULL;
432     q->first_pkt = NULL;
433     q->nb_packets = 0;
434     q->size = 0;
435     SDL_UnlockMutex(q->mutex);
436 }
437
438 static void packet_queue_destroy(PacketQueue *q)
439 {
440     packet_queue_flush(q);
441     SDL_DestroyMutex(q->mutex);
442     SDL_DestroyCond(q->cond);
443 }
444
445 static void packet_queue_abort(PacketQueue *q)
446 {
447     SDL_LockMutex(q->mutex);
448
449     q->abort_request = 1;
450
451     SDL_CondSignal(q->cond);
452
453     SDL_UnlockMutex(q->mutex);
454 }
455
456 static void packet_queue_start(PacketQueue *q)
457 {
458     SDL_LockMutex(q->mutex);
459     q->abort_request = 0;
460     packet_queue_put_private(q, &flush_pkt);
461     SDL_UnlockMutex(q->mutex);
462 }
463
464 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
465 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
466 {
467     MyAVPacketList *pkt1;
468     int ret;
469
470     SDL_LockMutex(q->mutex);
471
472     for (;;) {
473         if (q->abort_request) {
474             ret = -1;
475             break;
476         }
477
478         pkt1 = q->first_pkt;
479         if (pkt1) {
480             q->first_pkt = pkt1->next;
481             if (!q->first_pkt)
482                 q->last_pkt = NULL;
483             q->nb_packets--;
484             q->size -= pkt1->pkt.size + sizeof(*pkt1);
485             *pkt = pkt1->pkt;
486             if (serial)
487                 *serial = pkt1->serial;
488             av_free(pkt1);
489             ret = 1;
490             break;
491         } else if (!block) {
492             ret = 0;
493             break;
494         } else {
495             SDL_CondWait(q->cond, q->mutex);
496         }
497     }
498     SDL_UnlockMutex(q->mutex);
499     return ret;
500 }
501
502 static inline void fill_rectangle(SDL_Surface *screen,
503                                   int x, int y, int w, int h, int color, int update)
504 {
505     SDL_Rect rect;
506     rect.x = x;
507     rect.y = y;
508     rect.w = w;
509     rect.h = h;
510     SDL_FillRect(screen, &rect, color);
511     if (update && w > 0 && h > 0)
512         SDL_UpdateRect(screen, x, y, w, h);
513 }
514
515 /* draw only the border of a rectangle */
516 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
517 {
518     int w1, w2, h1, h2;
519
520     /* fill the background */
521     w1 = x;
522     if (w1 < 0)
523         w1 = 0;
524     w2 = width - (x + w);
525     if (w2 < 0)
526         w2 = 0;
527     h1 = y;
528     if (h1 < 0)
529         h1 = 0;
530     h2 = height - (y + h);
531     if (h2 < 0)
532         h2 = 0;
533     fill_rectangle(screen,
534                    xleft, ytop,
535                    w1, height,
536                    color, update);
537     fill_rectangle(screen,
538                    xleft + width - w2, ytop,
539                    w2, height,
540                    color, update);
541     fill_rectangle(screen,
542                    xleft + w1, ytop,
543                    width - w1 - w2, h1,
544                    color, update);
545     fill_rectangle(screen,
546                    xleft + w1, ytop + height - h2,
547                    width - w1 - w2, h2,
548                    color, update);
549 }
550
551 #define ALPHA_BLEND(a, oldp, newp, s)\
552 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
553
554 #define RGBA_IN(r, g, b, a, s)\
555 {\
556     unsigned int v = ((const uint32_t *)(s))[0];\
557     a = (v >> 24) & 0xff;\
558     r = (v >> 16) & 0xff;\
559     g = (v >> 8) & 0xff;\
560     b = v & 0xff;\
561 }
562
563 #define YUVA_IN(y, u, v, a, s, pal)\
564 {\
565     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
566     a = (val >> 24) & 0xff;\
567     y = (val >> 16) & 0xff;\
568     u = (val >> 8) & 0xff;\
569     v = val & 0xff;\
570 }
571
572 #define YUVA_OUT(d, y, u, v, a)\
573 {\
574     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
575 }
576
577
578 #define BPP 1
579
580 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
581 {
582     int wrap, wrap3, width2, skip2;
583     int y, u, v, a, u1, v1, a1, w, h;
584     uint8_t *lum, *cb, *cr;
585     const uint8_t *p;
586     const uint32_t *pal;
587     int dstx, dsty, dstw, dsth;
588
589     dstw = av_clip(rect->w, 0, imgw);
590     dsth = av_clip(rect->h, 0, imgh);
591     dstx = av_clip(rect->x, 0, imgw - dstw);
592     dsty = av_clip(rect->y, 0, imgh - dsth);
593     lum = dst->data[0] + dsty * dst->linesize[0];
594     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
595     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
596
597     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
598     skip2 = dstx >> 1;
599     wrap = dst->linesize[0];
600     wrap3 = rect->pict.linesize[0];
601     p = rect->pict.data[0];
602     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
603
604     if (dsty & 1) {
605         lum += dstx;
606         cb += skip2;
607         cr += skip2;
608
609         if (dstx & 1) {
610             YUVA_IN(y, u, v, a, p, pal);
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
613             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
614             cb++;
615             cr++;
616             lum++;
617             p += BPP;
618         }
619         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
620             YUVA_IN(y, u, v, a, p, pal);
621             u1 = u;
622             v1 = v;
623             a1 = a;
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625
626             YUVA_IN(y, u, v, a, p + BPP, pal);
627             u1 += u;
628             v1 += v;
629             a1 += a;
630             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
631             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
632             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
633             cb++;
634             cr++;
635             p += 2 * BPP;
636             lum += 2;
637         }
638         if (w) {
639             YUVA_IN(y, u, v, a, p, pal);
640             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643             p++;
644             lum++;
645         }
646         p += wrap3 - dstw * BPP;
647         lum += wrap - dstw - dstx;
648         cb += dst->linesize[1] - width2 - skip2;
649         cr += dst->linesize[2] - width2 - skip2;
650     }
651     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
652         lum += dstx;
653         cb += skip2;
654         cr += skip2;
655
656         if (dstx & 1) {
657             YUVA_IN(y, u, v, a, p, pal);
658             u1 = u;
659             v1 = v;
660             a1 = a;
661             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
662             p += wrap3;
663             lum += wrap;
664             YUVA_IN(y, u, v, a, p, pal);
665             u1 += u;
666             v1 += v;
667             a1 += a;
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
670             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
671             cb++;
672             cr++;
673             p += -wrap3 + BPP;
674             lum += -wrap + 1;
675         }
676         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
677             YUVA_IN(y, u, v, a, p, pal);
678             u1 = u;
679             v1 = v;
680             a1 = a;
681             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
682
683             YUVA_IN(y, u, v, a, p + BPP, pal);
684             u1 += u;
685             v1 += v;
686             a1 += a;
687             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
688             p += wrap3;
689             lum += wrap;
690
691             YUVA_IN(y, u, v, a, p, pal);
692             u1 += u;
693             v1 += v;
694             a1 += a;
695             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
696
697             YUVA_IN(y, u, v, a, p + BPP, pal);
698             u1 += u;
699             v1 += v;
700             a1 += a;
701             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
702
703             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
704             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
705
706             cb++;
707             cr++;
708             p += -wrap3 + 2 * BPP;
709             lum += -wrap + 2;
710         }
711         if (w) {
712             YUVA_IN(y, u, v, a, p, pal);
713             u1 = u;
714             v1 = v;
715             a1 = a;
716             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
717             p += wrap3;
718             lum += wrap;
719             YUVA_IN(y, u, v, a, p, pal);
720             u1 += u;
721             v1 += v;
722             a1 += a;
723             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
724             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
725             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
726             cb++;
727             cr++;
728             p += -wrap3 + BPP;
729             lum += -wrap + 1;
730         }
731         p += wrap3 + (wrap3 - dstw * BPP);
732         lum += wrap + (wrap - dstw - dstx);
733         cb += dst->linesize[1] - width2 - skip2;
734         cr += dst->linesize[2] - width2 - skip2;
735     }
736     /* handle odd height */
737     if (h) {
738         lum += dstx;
739         cb += skip2;
740         cr += skip2;
741
742         if (dstx & 1) {
743             YUVA_IN(y, u, v, a, p, pal);
744             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
745             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
746             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
747             cb++;
748             cr++;
749             lum++;
750             p += BPP;
751         }
752         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
753             YUVA_IN(y, u, v, a, p, pal);
754             u1 = u;
755             v1 = v;
756             a1 = a;
757             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
758
759             YUVA_IN(y, u, v, a, p + BPP, pal);
760             u1 += u;
761             v1 += v;
762             a1 += a;
763             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
764             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
765             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
766             cb++;
767             cr++;
768             p += 2 * BPP;
769             lum += 2;
770         }
771         if (w) {
772             YUVA_IN(y, u, v, a, p, pal);
773             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
774             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
775             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
776         }
777     }
778 }
779
780 static void free_subpicture(SubPicture *sp)
781 {
782     avsubtitle_free(&sp->sub);
783 }
784
785 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
786 {
787     float aspect_ratio;
788     int width, height, x, y;
789
790     if (vp->sar.num == 0)
791         aspect_ratio = 0;
792     else
793         aspect_ratio = av_q2d(vp->sar);
794
795     if (aspect_ratio <= 0.0)
796         aspect_ratio = 1.0;
797     aspect_ratio *= (float)vp->width / (float)vp->height;
798
799     /* XXX: we suppose the screen has a 1.0 pixel ratio */
800     height = scr_height;
801     width = ((int)rint(height * aspect_ratio)) & ~1;
802     if (width > scr_width) {
803         width = scr_width;
804         height = ((int)rint(width / aspect_ratio)) & ~1;
805     }
806     x = (scr_width - width) / 2;
807     y = (scr_height - height) / 2;
808     rect->x = scr_xleft + x;
809     rect->y = scr_ytop  + y;
810     rect->w = FFMAX(width,  1);
811     rect->h = FFMAX(height, 1);
812 }
813
814 static void video_image_display(VideoState *is)
815 {
816     VideoPicture *vp;
817     SubPicture *sp;
818     AVPicture pict;
819     SDL_Rect rect;
820     int i;
821
822     vp = &is->pictq[is->pictq_rindex];
823     if (vp->bmp) {
824         if (is->subtitle_st) {
825             if (is->subpq_size > 0) {
826                 sp = &is->subpq[is->subpq_rindex];
827
828                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
829                     SDL_LockYUVOverlay (vp->bmp);
830
831                     pict.data[0] = vp->bmp->pixels[0];
832                     pict.data[1] = vp->bmp->pixels[2];
833                     pict.data[2] = vp->bmp->pixels[1];
834
835                     pict.linesize[0] = vp->bmp->pitches[0];
836                     pict.linesize[1] = vp->bmp->pitches[2];
837                     pict.linesize[2] = vp->bmp->pitches[1];
838
839                     for (i = 0; i < sp->sub.num_rects; i++)
840                         blend_subrect(&pict, sp->sub.rects[i],
841                                       vp->bmp->w, vp->bmp->h);
842
843                     SDL_UnlockYUVOverlay (vp->bmp);
844                 }
845             }
846         }
847
848         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
849
850         SDL_DisplayYUVOverlay(vp->bmp, &rect);
851
852         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
853             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
854             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
855             is->last_display_rect = rect;
856         }
857     }
858 }
859
860 static inline int compute_mod(int a, int b)
861 {
862     return a < 0 ? a%b + b : a%b;
863 }
864
865 static void video_audio_display(VideoState *s)
866 {
867     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
868     int ch, channels, h, h2, bgcolor, fgcolor;
869     int64_t time_diff;
870     int rdft_bits, nb_freq;
871
872     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
873         ;
874     nb_freq = 1 << (rdft_bits - 1);
875
876     /* compute display index : center on currently output samples */
877     channels = s->audio_tgt.channels;
878     nb_display_channels = channels;
879     if (!s->paused) {
880         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
881         n = 2 * channels;
882         delay = s->audio_write_buf_size;
883         delay /= n;
884
885         /* to be more precise, we take into account the time spent since
886            the last buffer computation */
887         if (audio_callback_time) {
888             time_diff = av_gettime() - audio_callback_time;
889             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
890         }
891
892         delay += 2 * data_used;
893         if (delay < data_used)
894             delay = data_used;
895
896         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
897         if (s->show_mode == SHOW_MODE_WAVES) {
898             h = INT_MIN;
899             for (i = 0; i < 1000; i += channels) {
900                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
901                 int a = s->sample_array[idx];
902                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
903                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
904                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
905                 int score = a - d;
906                 if (h < score && (b ^ c) < 0) {
907                     h = score;
908                     i_start = idx;
909                 }
910             }
911         }
912
913         s->last_i_start = i_start;
914     } else {
915         i_start = s->last_i_start;
916     }
917
918     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
919     if (s->show_mode == SHOW_MODE_WAVES) {
920         fill_rectangle(screen,
921                        s->xleft, s->ytop, s->width, s->height,
922                        bgcolor, 0);
923
924         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
925
926         /* total height for one channel */
927         h = s->height / nb_display_channels;
928         /* graph height / 2 */
929         h2 = (h * 9) / 20;
930         for (ch = 0; ch < nb_display_channels; ch++) {
931             i = i_start + ch;
932             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
933             for (x = 0; x < s->width; x++) {
934                 y = (s->sample_array[i] * h2) >> 15;
935                 if (y < 0) {
936                     y = -y;
937                     ys = y1 - y;
938                 } else {
939                     ys = y1;
940                 }
941                 fill_rectangle(screen,
942                                s->xleft + x, ys, 1, y,
943                                fgcolor, 0);
944                 i += channels;
945                 if (i >= SAMPLE_ARRAY_SIZE)
946                     i -= SAMPLE_ARRAY_SIZE;
947             }
948         }
949
950         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
951
952         for (ch = 1; ch < nb_display_channels; ch++) {
953             y = s->ytop + ch * h;
954             fill_rectangle(screen,
955                            s->xleft, y, s->width, 1,
956                            fgcolor, 0);
957         }
958         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
959     } else {
960         nb_display_channels= FFMIN(nb_display_channels, 2);
961         if (rdft_bits != s->rdft_bits) {
962             av_rdft_end(s->rdft);
963             av_free(s->rdft_data);
964             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
965             s->rdft_bits = rdft_bits;
966             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
967         }
968         {
969             FFTSample *data[2];
970             for (ch = 0; ch < nb_display_channels; ch++) {
971                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
972                 i = i_start + ch;
973                 for (x = 0; x < 2 * nb_freq; x++) {
974                     double w = (x-nb_freq) * (1.0 / nb_freq);
975                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
976                     i += channels;
977                     if (i >= SAMPLE_ARRAY_SIZE)
978                         i -= SAMPLE_ARRAY_SIZE;
979                 }
980                 av_rdft_calc(s->rdft, data[ch]);
981             }
982             // least efficient way to do this, we should of course directly access it but its more than fast enough
983             for (y = 0; y < s->height; y++) {
984                 double w = 1 / sqrt(nb_freq);
985                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
986                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
987                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
988                 a = FFMIN(a, 255);
989                 b = FFMIN(b, 255);
990                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
991
992                 fill_rectangle(screen,
993                             s->xpos, s->height-y, 1, 1,
994                             fgcolor, 0);
995             }
996         }
997         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
998         if (!s->paused)
999             s->xpos++;
1000         if (s->xpos >= s->width)
1001             s->xpos= s->xleft;
1002     }
1003 }
1004
1005 static void stream_close(VideoState *is)
1006 {
1007     VideoPicture *vp;
1008     int i;
1009     /* XXX: use a special url_shutdown call to abort parse cleanly */
1010     is->abort_request = 1;
1011     SDL_WaitThread(is->read_tid, NULL);
1012     packet_queue_destroy(&is->videoq);
1013     packet_queue_destroy(&is->audioq);
1014     packet_queue_destroy(&is->subtitleq);
1015
1016     /* free all pictures */
1017     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1018         vp = &is->pictq[i];
1019         if (vp->bmp) {
1020             SDL_FreeYUVOverlay(vp->bmp);
1021             vp->bmp = NULL;
1022         }
1023     }
1024     SDL_DestroyMutex(is->pictq_mutex);
1025     SDL_DestroyCond(is->pictq_cond);
1026     SDL_DestroyMutex(is->subpq_mutex);
1027     SDL_DestroyCond(is->subpq_cond);
1028     SDL_DestroyCond(is->continue_read_thread);
1029 #if !CONFIG_AVFILTER
1030     sws_freeContext(is->img_convert_ctx);
1031 #endif
1032     av_free(is);
1033 }
1034
1035 static void do_exit(VideoState *is)
1036 {
1037     if (is) {
1038         stream_close(is);
1039     }
1040     av_lockmgr_register(NULL);
1041     uninit_opts();
1042 #if CONFIG_AVFILTER
1043     av_freep(&vfilters);
1044 #endif
1045     avformat_network_deinit();
1046     if (show_status)
1047         printf("\n");
1048     SDL_Quit();
1049     av_log(NULL, AV_LOG_QUIET, "%s", "");
1050     exit(0);
1051 }
1052
1053 static void sigterm_handler(int sig)
1054 {
1055     exit(123);
1056 }
1057
1058 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1059 {
1060     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1061     int w,h;
1062     SDL_Rect rect;
1063
1064     if (is_full_screen) flags |= SDL_FULLSCREEN;
1065     else                flags |= SDL_RESIZABLE;
1066
1067     if (vp && vp->width) {
1068         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1069         default_width  = rect.w;
1070         default_height = rect.h;
1071     }
1072
1073     if (is_full_screen && fs_screen_width) {
1074         w = fs_screen_width;
1075         h = fs_screen_height;
1076     } else if (!is_full_screen && screen_width) {
1077         w = screen_width;
1078         h = screen_height;
1079     } else {
1080         w = default_width;
1081         h = default_height;
1082     }
1083     if (screen && is->width == screen->w && screen->w == w
1084        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1085         return 0;
1086     screen = SDL_SetVideoMode(w, h, 0, flags);
1087     if (!screen) {
1088         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1089         do_exit(is);
1090     }
1091     if (!window_title)
1092         window_title = input_filename;
1093     SDL_WM_SetCaption(window_title, window_title);
1094
1095     is->width  = screen->w;
1096     is->height = screen->h;
1097
1098     return 0;
1099 }
1100
1101 /* display the current picture, if any */
1102 static void video_display(VideoState *is)
1103 {
1104     if (!screen)
1105         video_open(is, 0, NULL);
1106     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1107         video_audio_display(is);
1108     else if (is->video_st)
1109         video_image_display(is);
1110 }
1111
1112 static double get_clock(Clock *c)
1113 {
1114     if (*c->queue_serial != c->serial)
1115         return NAN;
1116     if (c->paused) {
1117         return c->pts;
1118     } else {
1119         double time = av_gettime() / 1000000.0;
1120         return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1121     }
1122 }
1123
1124 static void set_clock_at(Clock *c, double pts, int serial, double time)
1125 {
1126     c->pts = pts;
1127     c->last_updated = time;
1128     c->pts_drift = c->pts - time;
1129     c->serial = serial;
1130 }
1131
1132 static void set_clock(Clock *c, double pts, int serial)
1133 {
1134     double time = av_gettime() / 1000000.0;
1135     set_clock_at(c, pts, serial, time);
1136 }
1137
1138 static void set_clock_speed(Clock *c, double speed)
1139 {
1140     set_clock(c, get_clock(c), c->serial);
1141     c->speed = speed;
1142 }
1143
1144 static void init_clock(Clock *c, int *queue_serial)
1145 {
1146     c->speed = 1.0;
1147     c->paused = 0;
1148     c->queue_serial = queue_serial;
1149     set_clock(c, NAN, -1);
1150 }
1151
1152 static void sync_clock_to_slave(Clock *c, Clock *slave)
1153 {
1154     double clock = get_clock(c);
1155     double slave_clock = get_clock(slave);
1156     if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1157         set_clock(c, slave_clock, slave->serial);
1158 }
1159
1160 static int get_master_sync_type(VideoState *is) {
1161     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1162         if (is->video_st)
1163             return AV_SYNC_VIDEO_MASTER;
1164         else
1165             return AV_SYNC_AUDIO_MASTER;
1166     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1167         if (is->audio_st)
1168             return AV_SYNC_AUDIO_MASTER;
1169         else
1170             return AV_SYNC_EXTERNAL_CLOCK;
1171     } else {
1172         return AV_SYNC_EXTERNAL_CLOCK;
1173     }
1174 }
1175
1176 /* get the current master clock value */
1177 static double get_master_clock(VideoState *is)
1178 {
1179     double val;
1180
1181     switch (get_master_sync_type(is)) {
1182         case AV_SYNC_VIDEO_MASTER:
1183             val = get_clock(&is->vidclk);
1184             break;
1185         case AV_SYNC_AUDIO_MASTER:
1186             val = get_clock(&is->audclk);
1187             break;
1188         default:
1189             val = get_clock(&is->extclk);
1190             break;
1191     }
1192     return val;
1193 }
1194
1195 static void check_external_clock_speed(VideoState *is) {
1196    if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1197        is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1198        set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1199    } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1200               (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1201        set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1202    } else {
1203        double speed = is->extclk.speed;
1204        if (speed != 1.0)
1205            set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1206    }
1207 }
1208
1209 /* seek in the stream */
1210 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1211 {
1212     if (!is->seek_req) {
1213         is->seek_pos = pos;
1214         is->seek_rel = rel;
1215         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1216         if (seek_by_bytes)
1217             is->seek_flags |= AVSEEK_FLAG_BYTE;
1218         is->seek_req = 1;
1219         SDL_CondSignal(is->continue_read_thread);
1220     }
1221 }
1222
1223 /* pause or resume the video */
1224 static void stream_toggle_pause(VideoState *is)
1225 {
1226     if (is->paused) {
1227         is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1228         if (is->read_pause_return != AVERROR(ENOSYS)) {
1229             is->vidclk.paused = 0;
1230         }
1231         set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1232     }
1233     set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1234     is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1235 }
1236
1237 static void toggle_pause(VideoState *is)
1238 {
1239     stream_toggle_pause(is);
1240     is->step = 0;
1241 }
1242
1243 static void step_to_next_frame(VideoState *is)
1244 {
1245     /* if the stream is paused unpause it, then step */
1246     if (is->paused)
1247         stream_toggle_pause(is);
1248     is->step = 1;
1249 }
1250
1251 static double compute_target_delay(double delay, VideoState *is)
1252 {
1253     double sync_threshold, diff;
1254
1255     /* update delay to follow master synchronisation source */
1256     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1257         /* if video is slave, we try to correct big delays by
1258            duplicating or deleting a frame */
1259         diff = get_clock(&is->vidclk) - get_master_clock(is);
1260
1261         /* skip or repeat frame. We take into account the
1262            delay to compute the threshold. I still don't know
1263            if it is the best guess */
1264         sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1265         if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1266             if (diff <= -sync_threshold)
1267                 delay = FFMAX(0, delay + diff);
1268             else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1269                 delay = delay + diff;
1270             else if (diff >= sync_threshold)
1271                 delay = 2 * delay;
1272         }
1273     }
1274
1275     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1276             delay, -diff);
1277
1278     return delay;
1279 }
1280
1281 static void pictq_next_picture(VideoState *is) {
1282     /* update queue size and signal for next picture */
1283     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1284         is->pictq_rindex = 0;
1285
1286     SDL_LockMutex(is->pictq_mutex);
1287     is->pictq_size--;
1288     SDL_CondSignal(is->pictq_cond);
1289     SDL_UnlockMutex(is->pictq_mutex);
1290 }
1291
1292 static int pictq_prev_picture(VideoState *is) {
1293     VideoPicture *prevvp;
1294     int ret = 0;
1295     /* update queue size and signal for the previous picture */
1296     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1297     if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1298         SDL_LockMutex(is->pictq_mutex);
1299         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
1300             if (--is->pictq_rindex == -1)
1301                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1302             is->pictq_size++;
1303             ret = 1;
1304         }
1305         SDL_CondSignal(is->pictq_cond);
1306         SDL_UnlockMutex(is->pictq_mutex);
1307     }
1308     return ret;
1309 }
1310
1311 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1312     /* update current video pts */
1313     set_clock(&is->vidclk, pts, serial);
1314     sync_clock_to_slave(&is->extclk, &is->vidclk);
1315     is->video_current_pos = pos;
1316     is->frame_last_pts = pts;
1317 }
1318
1319 /* called to display each frame */
1320 static void video_refresh(void *opaque, double *remaining_time)
1321 {
1322     VideoState *is = opaque;
1323     VideoPicture *vp;
1324     double time;
1325
1326     SubPicture *sp, *sp2;
1327
1328     if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1329         check_external_clock_speed(is);
1330
1331     if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1332         time = av_gettime() / 1000000.0;
1333         if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1334             video_display(is);
1335             is->last_vis_time = time;
1336         }
1337         *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1338     }
1339
1340     if (is->video_st) {
1341         int redisplay = 0;
1342         if (is->force_refresh)
1343             redisplay = pictq_prev_picture(is);
1344 retry:
1345         if (is->pictq_size == 0) {
1346             SDL_LockMutex(is->pictq_mutex);
1347             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1348                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, is->frame_last_dropped_serial);
1349                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1350             }
1351             SDL_UnlockMutex(is->pictq_mutex);
1352             // nothing to do, no picture to display in the queue
1353         } else {
1354             double last_duration, duration, delay;
1355             /* dequeue the picture */
1356             vp = &is->pictq[is->pictq_rindex];
1357
1358             if (vp->serial != is->videoq.serial) {
1359                 pictq_next_picture(is);
1360                 redisplay = 0;
1361                 goto retry;
1362             }
1363
1364             if (is->paused)
1365                 goto display;
1366
1367             /* compute nominal last_duration */
1368             last_duration = vp->pts - is->frame_last_pts;
1369             if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {
1370                 /* if duration of the last frame was sane, update last_duration in video state */
1371                 is->frame_last_duration = last_duration;
1372             }
1373             if (redisplay)
1374                 delay = 0.0;
1375             else
1376                 delay = compute_target_delay(is->frame_last_duration, is);
1377
1378             time= av_gettime()/1000000.0;
1379             if (time < is->frame_timer + delay && !redisplay) {
1380                 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1381                 return;
1382             }
1383
1384             is->frame_timer += delay;
1385             if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1386                 is->frame_timer = time;
1387
1388             SDL_LockMutex(is->pictq_mutex);
1389             if (!redisplay && !isnan(vp->pts))
1390                 update_video_pts(is, vp->pts, vp->pos, vp->serial);
1391             SDL_UnlockMutex(is->pictq_mutex);
1392
1393             if (is->pictq_size > 1) {
1394                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1395                 duration = nextvp->pts - vp->pts;
1396                 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1397                     if (!redisplay)
1398                         is->frame_drops_late++;
1399                     pictq_next_picture(is);
1400                     redisplay = 0;
1401                     goto retry;
1402                 }
1403             }
1404
1405             if (is->subtitle_st) {
1406                 if (is->subtitle_stream_changed) {
1407                     SDL_LockMutex(is->subpq_mutex);
1408
1409                     while (is->subpq_size) {
1410                         free_subpicture(&is->subpq[is->subpq_rindex]);
1411
1412                         /* update queue size and signal for next picture */
1413                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1414                             is->subpq_rindex = 0;
1415
1416                         is->subpq_size--;
1417                     }
1418                     is->subtitle_stream_changed = 0;
1419
1420                     SDL_CondSignal(is->subpq_cond);
1421                     SDL_UnlockMutex(is->subpq_mutex);
1422                 } else {
1423                     if (is->subpq_size > 0) {
1424                         sp = &is->subpq[is->subpq_rindex];
1425
1426                         if (is->subpq_size > 1)
1427                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1428                         else
1429                             sp2 = NULL;
1430
1431                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1432                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1433                         {
1434                             free_subpicture(sp);
1435
1436                             /* update queue size and signal for next picture */
1437                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1438                                 is->subpq_rindex = 0;
1439
1440                             SDL_LockMutex(is->subpq_mutex);
1441                             is->subpq_size--;
1442                             SDL_CondSignal(is->subpq_cond);
1443                             SDL_UnlockMutex(is->subpq_mutex);
1444                         }
1445                     }
1446                 }
1447             }
1448
1449 display:
1450             /* display picture */
1451             if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1452                 video_display(is);
1453
1454             pictq_next_picture(is);
1455
1456             if (is->step && !is->paused)
1457                 stream_toggle_pause(is);
1458         }
1459     }
1460     is->force_refresh = 0;
1461     if (show_status) {
1462         static int64_t last_time;
1463         int64_t cur_time;
1464         int aqsize, vqsize, sqsize;
1465         double av_diff;
1466
1467         cur_time = av_gettime();
1468         if (!last_time || (cur_time - last_time) >= 30000) {
1469             aqsize = 0;
1470             vqsize = 0;
1471             sqsize = 0;
1472             if (is->audio_st)
1473                 aqsize = is->audioq.size;
1474             if (is->video_st)
1475                 vqsize = is->videoq.size;
1476             if (is->subtitle_st)
1477                 sqsize = is->subtitleq.size;
1478             av_diff = 0;
1479             if (is->audio_st && is->video_st)
1480                 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1481             else if (is->video_st)
1482                 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1483             else if (is->audio_st)
1484                 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1485             printf("%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1486                    get_master_clock(is),
1487                    (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : "   ")),
1488                    av_diff,
1489                    is->frame_drops_early + is->frame_drops_late,
1490                    aqsize / 1024,
1491                    vqsize / 1024,
1492                    sqsize,
1493                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1494                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1495             fflush(stdout);
1496             last_time = cur_time;
1497         }
1498     }
1499 }
1500
1501 /* allocate a picture (needs to do that in main thread to avoid
1502    potential locking problems */
1503 static void alloc_picture(VideoState *is)
1504 {
1505     VideoPicture *vp;
1506
1507     vp = &is->pictq[is->pictq_windex];
1508
1509     if (vp->bmp)
1510         SDL_FreeYUVOverlay(vp->bmp);
1511
1512     video_open(is, 0, vp);
1513
1514     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1515                                    SDL_YV12_OVERLAY,
1516                                    screen);
1517     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1518         /* SDL allocates a buffer smaller than requested if the video
1519          * overlay hardware is unable to support the requested size. */
1520         fprintf(stderr, "Error: the video system does not support an image\n"
1521                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1522                         "to reduce the image size.\n", vp->width, vp->height );
1523         do_exit(is);
1524     }
1525
1526     SDL_LockMutex(is->pictq_mutex);
1527     vp->allocated = 1;
1528     SDL_CondSignal(is->pictq_cond);
1529     SDL_UnlockMutex(is->pictq_mutex);
1530 }
1531
1532 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1533     int i, width, height;
1534     Uint8 *p, *maxp;
1535     for (i = 0; i < 3; i++) {
1536         width  = bmp->w;
1537         height = bmp->h;
1538         if (i > 0) {
1539             width  >>= 1;
1540             height >>= 1;
1541         }
1542         if (bmp->pitches[i] > width) {
1543             maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1544             for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1545                 *(p+1) = *p;
1546         }
1547     }
1548 }
1549
1550 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1551 {
1552     VideoPicture *vp;
1553
1554 #if defined(DEBUG_SYNC) && 0
1555     printf("frame_type=%c pts=%0.3f\n",
1556            av_get_picture_type_char(src_frame->pict_type), pts);
1557 #endif
1558
1559     /* wait until we have space to put a new picture */
1560     SDL_LockMutex(is->pictq_mutex);
1561
1562     /* keep the last already displayed picture in the queue */
1563     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1564            !is->videoq.abort_request) {
1565         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1566     }
1567     SDL_UnlockMutex(is->pictq_mutex);
1568
1569     if (is->videoq.abort_request)
1570         return -1;
1571
1572     vp = &is->pictq[is->pictq_windex];
1573
1574     vp->sar = src_frame->sample_aspect_ratio;
1575
1576     /* alloc or resize hardware picture buffer */
1577     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1578         vp->width  != src_frame->width ||
1579         vp->height != src_frame->height) {
1580         SDL_Event event;
1581
1582         vp->allocated  = 0;
1583         vp->reallocate = 0;
1584         vp->width = src_frame->width;
1585         vp->height = src_frame->height;
1586
1587         /* the allocation must be done in the main thread to avoid
1588            locking problems. */
1589         event.type = FF_ALLOC_EVENT;
1590         event.user.data1 = is;
1591         SDL_PushEvent(&event);
1592
1593         /* wait until the picture is allocated */
1594         SDL_LockMutex(is->pictq_mutex);
1595         while (!vp->allocated && !is->videoq.abort_request) {
1596             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1597         }
1598         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1599         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1600             while (!vp->allocated) {
1601                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1602             }
1603         }
1604         SDL_UnlockMutex(is->pictq_mutex);
1605
1606         if (is->videoq.abort_request)
1607             return -1;
1608     }
1609
1610     /* if the frame is not skipped, then display it */
1611     if (vp->bmp) {
1612         AVPicture pict = { { 0 } };
1613
1614         /* get a pointer on the bitmap */
1615         SDL_LockYUVOverlay (vp->bmp);
1616
1617         pict.data[0] = vp->bmp->pixels[0];
1618         pict.data[1] = vp->bmp->pixels[2];
1619         pict.data[2] = vp->bmp->pixels[1];
1620
1621         pict.linesize[0] = vp->bmp->pitches[0];
1622         pict.linesize[1] = vp->bmp->pitches[2];
1623         pict.linesize[2] = vp->bmp->pitches[1];
1624
1625 #if CONFIG_AVFILTER
1626         // FIXME use direct rendering
1627         av_picture_copy(&pict, (AVPicture *)src_frame,
1628                         src_frame->format, vp->width, vp->height);
1629 #else
1630         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1631         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1632             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1633             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1634         if (is->img_convert_ctx == NULL) {
1635             fprintf(stderr, "Cannot initialize the conversion context\n");
1636             exit(1);
1637         }
1638         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1639                   0, vp->height, pict.data, pict.linesize);
1640 #endif
1641         /* workaround SDL PITCH_WORKAROUND */
1642         duplicate_right_border_pixels(vp->bmp);
1643         /* update the bitmap content */
1644         SDL_UnlockYUVOverlay(vp->bmp);
1645
1646         vp->pts = pts;
1647         vp->pos = pos;
1648         vp->serial = serial;
1649
1650         /* now we can update the picture count */
1651         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1652             is->pictq_windex = 0;
1653         SDL_LockMutex(is->pictq_mutex);
1654         is->pictq_size++;
1655         SDL_UnlockMutex(is->pictq_mutex);
1656     }
1657     return 0;
1658 }
1659
1660 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1661 {
1662     int got_picture;
1663
1664     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1665         return -1;
1666
1667     if (pkt->data == flush_pkt.data) {
1668         avcodec_flush_buffers(is->video_st->codec);
1669
1670         SDL_LockMutex(is->pictq_mutex);
1671         // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1672         while (is->pictq_size && !is->videoq.abort_request) {
1673             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1674         }
1675         is->video_current_pos = -1;
1676         is->frame_last_pts = AV_NOPTS_VALUE;
1677         is->frame_last_duration = 0;
1678         is->frame_timer = (double)av_gettime() / 1000000.0;
1679         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1680         SDL_UnlockMutex(is->pictq_mutex);
1681         return 0;
1682     }
1683
1684     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1685         return 0;
1686
1687     if (got_picture) {
1688         int ret = 1;
1689         double dpts = NAN;
1690
1691         if (decoder_reorder_pts == -1) {
1692             frame->pts = av_frame_get_best_effort_timestamp(frame);
1693         } else if (decoder_reorder_pts) {
1694             frame->pts = frame->pkt_pts;
1695         } else {
1696             frame->pts = frame->pkt_dts;
1697         }
1698
1699         if (frame->pts != AV_NOPTS_VALUE)
1700             dpts = av_q2d(is->video_st->time_base) * frame->pts;
1701
1702         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1703
1704         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1705             SDL_LockMutex(is->pictq_mutex);
1706             if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
1707                 double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
1708                 double ptsdiff = dpts - is->frame_last_pts;
1709                 if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1710                     !isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1711                     clockdiff + ptsdiff - is->frame_last_filter_delay < 0 &&
1712                     is->videoq.nb_packets) {
1713                     is->frame_last_dropped_pos = pkt->pos;
1714                     is->frame_last_dropped_pts = dpts;
1715                     is->frame_last_dropped_serial = *serial;
1716                     is->frame_drops_early++;
1717                     av_frame_unref(frame);
1718                     ret = 0;
1719                 }
1720             }
1721             SDL_UnlockMutex(is->pictq_mutex);
1722         }
1723
1724         return ret;
1725     }
1726     return 0;
1727 }
1728
1729 #if CONFIG_AVFILTER
1730 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1731                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1732 {
1733     int ret;
1734     AVFilterInOut *outputs = NULL, *inputs = NULL;
1735
1736     if (filtergraph) {
1737         outputs = avfilter_inout_alloc();
1738         inputs  = avfilter_inout_alloc();
1739         if (!outputs || !inputs) {
1740             ret = AVERROR(ENOMEM);
1741             goto fail;
1742         }
1743
1744         outputs->name       = av_strdup("in");
1745         outputs->filter_ctx = source_ctx;
1746         outputs->pad_idx    = 0;
1747         outputs->next       = NULL;
1748
1749         inputs->name        = av_strdup("out");
1750         inputs->filter_ctx  = sink_ctx;
1751         inputs->pad_idx     = 0;
1752         inputs->next        = NULL;
1753
1754         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1755             goto fail;
1756     } else {
1757         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1758             goto fail;
1759     }
1760
1761     ret = avfilter_graph_config(graph, NULL);
1762 fail:
1763     avfilter_inout_free(&outputs);
1764     avfilter_inout_free(&inputs);
1765     return ret;
1766 }
1767
1768 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1769 {
1770     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1771     char sws_flags_str[128];
1772     char buffersrc_args[256];
1773     int ret;
1774     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1775     AVCodecContext *codec = is->video_st->codec;
1776     AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1777
1778     av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1779     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1780     graph->scale_sws_opts = av_strdup(sws_flags_str);
1781
1782     snprintf(buffersrc_args, sizeof(buffersrc_args),
1783              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1784              frame->width, frame->height, frame->format,
1785              is->video_st->time_base.num, is->video_st->time_base.den,
1786              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1787     if (fr.num && fr.den)
1788         av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1789
1790     if ((ret = avfilter_graph_create_filter(&filt_src,
1791                                             avfilter_get_by_name("buffer"),
1792                                             "ffplay_buffer", buffersrc_args, NULL,
1793                                             graph)) < 0)
1794         goto fail;
1795
1796     ret = avfilter_graph_create_filter(&filt_out,
1797                                        avfilter_get_by_name("buffersink"),
1798                                        "ffplay_buffersink", NULL, NULL, graph);
1799     if (ret < 0)
1800         goto fail;
1801
1802     if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts,  AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1803         goto fail;
1804
1805     /* SDL YUV code is not handling odd width/height for some driver
1806      * combinations, therefore we crop the picture to an even width/height. */
1807     if ((ret = avfilter_graph_create_filter(&filt_crop,
1808                                             avfilter_get_by_name("crop"),
1809                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1810         goto fail;
1811     if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1812         goto fail;
1813
1814     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1815         goto fail;
1816
1817     is->in_video_filter  = filt_src;
1818     is->out_video_filter = filt_out;
1819
1820 fail:
1821     return ret;
1822 }
1823
1824 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1825 {
1826     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1827     int sample_rates[2] = { 0, -1 };
1828     int64_t channel_layouts[2] = { 0, -1 };
1829     int channels[2] = { 0, -1 };
1830     AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1831     char asrc_args[256];
1832     int ret;
1833
1834     avfilter_graph_free(&is->agraph);
1835     if (!(is->agraph = avfilter_graph_alloc()))
1836         return AVERROR(ENOMEM);
1837
1838     ret = snprintf(asrc_args, sizeof(asrc_args),
1839                    "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1840                    is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1841                    is->audio_filter_src.channels,
1842                    1, is->audio_filter_src.freq);
1843     if (is->audio_filter_src.channel_layout)
1844         snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1845                  ":channel_layout=0x%"PRIx64,  is->audio_filter_src.channel_layout);
1846
1847     ret = avfilter_graph_create_filter(&filt_asrc,
1848                                        avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1849                                        asrc_args, NULL, is->agraph);
1850     if (ret < 0)
1851         goto end;
1852
1853
1854     ret = avfilter_graph_create_filter(&filt_asink,
1855                                        avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1856                                        NULL, NULL, is->agraph);
1857     if (ret < 0)
1858         goto end;
1859
1860     if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts,  AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1861         goto end;
1862     if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1863         goto end;
1864
1865     if (force_output_format) {
1866         channel_layouts[0] = is->audio_tgt.channel_layout;
1867         channels       [0] = is->audio_tgt.channels;
1868         sample_rates   [0] = is->audio_tgt.freq;
1869         if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1870             goto end;
1871         if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1872             goto end;
1873         if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels       ,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1874             goto end;
1875         if ((ret = av_opt_set_int_list(filt_asink, "sample_rates"   , sample_rates   ,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1876             goto end;
1877     }
1878
1879
1880     if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1881         goto end;
1882
1883     is->in_audio_filter  = filt_asrc;
1884     is->out_audio_filter = filt_asink;
1885
1886 end:
1887     if (ret < 0)
1888         avfilter_graph_free(&is->agraph);
1889     return ret;
1890 }
1891 #endif  /* CONFIG_AVFILTER */
1892
1893 static int video_thread(void *arg)
1894 {
1895     AVPacket pkt = { 0 };
1896     VideoState *is = arg;
1897     AVFrame *frame = av_frame_alloc();
1898     double pts;
1899     int ret;
1900     int serial = 0;
1901
1902 #if CONFIG_AVFILTER
1903     AVFilterGraph *graph = avfilter_graph_alloc();
1904     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1905     int last_w = 0;
1906     int last_h = 0;
1907     enum AVPixelFormat last_format = -2;
1908     int last_serial = -1;
1909 #endif
1910
1911     for (;;) {
1912         while (is->paused && !is->videoq.abort_request)
1913             SDL_Delay(10);
1914
1915         avcodec_get_frame_defaults(frame);
1916         av_free_packet(&pkt);
1917
1918         ret = get_video_frame(is, frame, &pkt, &serial);
1919         if (ret < 0)
1920             goto the_end;
1921         if (!ret)
1922             continue;
1923
1924 #if CONFIG_AVFILTER
1925         if (   last_w != frame->width
1926             || last_h != frame->height
1927             || last_format != frame->format
1928             || last_serial != serial) {
1929             av_log(NULL, AV_LOG_DEBUG,
1930                    "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1931                    last_w, last_h,
1932                    (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1933                    frame->width, frame->height,
1934                    (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1935             avfilter_graph_free(&graph);
1936             graph = avfilter_graph_alloc();
1937             if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1938                 SDL_Event event;
1939                 event.type = FF_QUIT_EVENT;
1940                 event.user.data1 = is;
1941                 SDL_PushEvent(&event);
1942                 av_free_packet(&pkt);
1943                 goto the_end;
1944             }
1945             filt_in  = is->in_video_filter;
1946             filt_out = is->out_video_filter;
1947             last_w = frame->width;
1948             last_h = frame->height;
1949             last_format = frame->format;
1950             last_serial = serial;
1951         }
1952
1953         ret = av_buffersrc_add_frame(filt_in, frame);
1954         if (ret < 0)
1955             goto the_end;
1956         av_frame_unref(frame);
1957         avcodec_get_frame_defaults(frame);
1958         av_free_packet(&pkt);
1959
1960         while (ret >= 0) {
1961             is->frame_last_returned_time = av_gettime() / 1000000.0;
1962
1963             ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1964             if (ret < 0) {
1965                 ret = 0;
1966                 break;
1967             }
1968
1969             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1970             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1971                 is->frame_last_filter_delay = 0;
1972
1973             pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
1974             ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1975             av_frame_unref(frame);
1976         }
1977 #else
1978         pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
1979         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1980         av_frame_unref(frame);
1981 #endif
1982
1983         if (ret < 0)
1984             goto the_end;
1985     }
1986  the_end:
1987     avcodec_flush_buffers(is->video_st->codec);
1988 #if CONFIG_AVFILTER
1989     avfilter_graph_free(&graph);
1990 #endif
1991     av_free_packet(&pkt);
1992     av_frame_free(&frame);
1993     return 0;
1994 }
1995
1996 static int subtitle_thread(void *arg)
1997 {
1998     VideoState *is = arg;
1999     SubPicture *sp;
2000     AVPacket pkt1, *pkt = &pkt1;
2001     int got_subtitle;
2002     double pts;
2003     int i, j;
2004     int r, g, b, y, u, v, a;
2005
2006     for (;;) {
2007         while (is->paused && !is->subtitleq.abort_request) {
2008             SDL_Delay(10);
2009         }
2010         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
2011             break;
2012
2013         if (pkt->data == flush_pkt.data) {
2014             avcodec_flush_buffers(is->subtitle_st->codec);
2015             continue;
2016         }
2017         SDL_LockMutex(is->subpq_mutex);
2018         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2019                !is->subtitleq.abort_request) {
2020             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2021         }
2022         SDL_UnlockMutex(is->subpq_mutex);
2023
2024         if (is->subtitleq.abort_request)
2025             return 0;
2026
2027         sp = &is->subpq[is->subpq_windex];
2028
2029        /* NOTE: ipts is the PTS of the _first_ picture beginning in
2030            this packet, if any */
2031         pts = 0;
2032         if (pkt->pts != AV_NOPTS_VALUE)
2033             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2034
2035         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2036                                  &got_subtitle, pkt);
2037         if (got_subtitle && sp->sub.format == 0) {
2038             if (sp->sub.pts != AV_NOPTS_VALUE)
2039                 pts = sp->sub.pts / (double)AV_TIME_BASE;
2040             sp->pts = pts;
2041
2042             for (i = 0; i < sp->sub.num_rects; i++)
2043             {
2044                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2045                 {
2046                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2047                     y = RGB_TO_Y_CCIR(r, g, b);
2048                     u = RGB_TO_U_CCIR(r, g, b, 0);
2049                     v = RGB_TO_V_CCIR(r, g, b, 0);
2050                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2051                 }
2052             }
2053
2054             /* now we can update the picture count */
2055             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2056                 is->subpq_windex = 0;
2057             SDL_LockMutex(is->subpq_mutex);
2058             is->subpq_size++;
2059             SDL_UnlockMutex(is->subpq_mutex);
2060         }
2061         av_free_packet(pkt);
2062     }
2063     return 0;
2064 }
2065
2066 /* copy samples for viewing in editor window */
2067 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2068 {
2069     int size, len;
2070
2071     size = samples_size / sizeof(short);
2072     while (size > 0) {
2073         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2074         if (len > size)
2075             len = size;
2076         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2077         samples += len;
2078         is->sample_array_index += len;
2079         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2080             is->sample_array_index = 0;
2081         size -= len;
2082     }
2083 }
2084
2085 /* return the wanted number of samples to get better sync if sync_type is video
2086  * or external master clock */
2087 static int synchronize_audio(VideoState *is, int nb_samples)
2088 {
2089     int wanted_nb_samples = nb_samples;
2090
2091     /* if not master, then we try to remove or add samples to correct the clock */
2092     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2093         double diff, avg_diff;
2094         int min_nb_samples, max_nb_samples;
2095
2096         diff = get_clock(&is->audclk) - get_master_clock(is);
2097
2098         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2099             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2100             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2101                 /* not enough measures to have a correct estimate */
2102                 is->audio_diff_avg_count++;
2103             } else {
2104                 /* estimate the A-V difference */
2105                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2106
2107                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2108                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2109                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2110                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2111                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2112                 }
2113                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2114                         diff, avg_diff, wanted_nb_samples - nb_samples,
2115                         is->audio_clock, is->audio_diff_threshold);
2116             }
2117         } else {
2118             /* too big difference : may be initial PTS errors, so
2119                reset A-V filter */
2120             is->audio_diff_avg_count = 0;
2121             is->audio_diff_cum       = 0;
2122         }
2123     }
2124
2125     return wanted_nb_samples;
2126 }
2127
2128 /**
2129  * Decode one audio frame and return its uncompressed size.
2130  *
2131  * The processed audio frame is decoded, converted if required, and
2132  * stored in is->audio_buf, with size in bytes given by the return
2133  * value.
2134  */
2135 static int audio_decode_frame(VideoState *is)
2136 {
2137     AVPacket *pkt_temp = &is->audio_pkt_temp;
2138     AVPacket *pkt = &is->audio_pkt;
2139     AVCodecContext *dec = is->audio_st->codec;
2140     int len1, data_size, resampled_data_size;
2141     int64_t dec_channel_layout;
2142     int got_frame;
2143     av_unused double audio_clock0;
2144     int new_packet = 0;
2145     int flush_complete = 0;
2146     int wanted_nb_samples;
2147     AVRational tb;
2148     int ret;
2149     int reconfigure;
2150
2151     for (;;) {
2152         /* NOTE: the audio packet can contain several frames */
2153         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) {
2154             if (!is->frame) {
2155                 if (!(is->frame = avcodec_alloc_frame()))
2156                     return AVERROR(ENOMEM);
2157             } else {
2158                 av_frame_unref(is->frame);
2159                 avcodec_get_frame_defaults(is->frame);
2160             }
2161
2162             if (is->audioq.serial != is->audio_pkt_temp_serial)
2163                 break;
2164
2165             if (is->paused)
2166                 return -1;
2167
2168             if (!is->audio_buf_frames_pending) {
2169                 if (flush_complete)
2170                     break;
2171                 new_packet = 0;
2172                 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2173                 if (len1 < 0) {
2174                     /* if error, we skip the frame */
2175                     pkt_temp->size = 0;
2176                     break;
2177                 }
2178
2179                 pkt_temp->data += len1;
2180                 pkt_temp->size -= len1;
2181
2182                 if (!got_frame) {
2183                     /* stop sending empty packets if the decoder is finished */
2184                     if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2185                         flush_complete = 1;
2186                     continue;
2187                 }
2188
2189                 tb = (AVRational){1, is->frame->sample_rate};
2190                 if (is->frame->pts != AV_NOPTS_VALUE)
2191                     is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2192                 if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
2193                     is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, tb);
2194                 if (pkt_temp->pts != AV_NOPTS_VALUE)
2195                     pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
2196
2197 #if CONFIG_AVFILTER
2198                 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2199
2200                 reconfigure =
2201                     cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2202                                    is->frame->format, av_frame_get_channels(is->frame))    ||
2203                     is->audio_filter_src.channel_layout != dec_channel_layout ||
2204                     is->audio_filter_src.freq           != is->frame->sample_rate ||
2205                     is->audio_pkt_temp_serial           != is->audio_last_serial;
2206
2207                 if (reconfigure) {
2208                     char buf1[1024], buf2[1024];
2209                     av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2210                     av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2211                     av_log(NULL, AV_LOG_DEBUG,
2212                            "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2213                            is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2214                            is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2215
2216                     is->audio_filter_src.fmt            = is->frame->format;
2217                     is->audio_filter_src.channels       = av_frame_get_channels(is->frame);
2218                     is->audio_filter_src.channel_layout = dec_channel_layout;
2219                     is->audio_filter_src.freq           = is->frame->sample_rate;
2220                     is->audio_last_serial               = is->audio_pkt_temp_serial;
2221
2222                     if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2223                         return ret;
2224                 }
2225
2226                 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2227                     return ret;
2228                 av_frame_unref(is->frame);
2229 #endif
2230             }
2231 #if CONFIG_AVFILTER
2232             if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2233                 if (ret == AVERROR(EAGAIN)) {
2234                     is->audio_buf_frames_pending = 0;
2235                     continue;
2236                 }
2237                 return ret;
2238             }
2239             is->audio_buf_frames_pending = 1;
2240             tb = is->out_audio_filter->inputs[0]->time_base;
2241 #endif
2242
2243             data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2244                                                    is->frame->nb_samples,
2245                                                    is->frame->format, 1);
2246
2247             dec_channel_layout =
2248                 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2249                 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2250             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2251
2252             if (is->frame->format        != is->audio_src.fmt            ||
2253                 dec_channel_layout       != is->audio_src.channel_layout ||
2254                 is->frame->sample_rate   != is->audio_src.freq           ||
2255                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2256                 swr_free(&is->swr_ctx);
2257                 is->swr_ctx = swr_alloc_set_opts(NULL,
2258                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2259                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2260                                                  0, NULL);
2261                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2262                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2263                             is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2264                             is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2265                     break;
2266                 }
2267                 is->audio_src.channel_layout = dec_channel_layout;
2268                 is->audio_src.channels       = av_frame_get_channels(is->frame);
2269                 is->audio_src.freq = is->frame->sample_rate;
2270                 is->audio_src.fmt = is->frame->format;
2271             }
2272
2273             if (is->swr_ctx) {
2274                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2275                 uint8_t **out = &is->audio_buf1;
2276                 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2277                 int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2278                 int len2;
2279                 if (out_size < 0) {
2280                     fprintf(stderr, "av_samples_get_buffer_size() failed\n");
2281                     break;
2282                 }
2283                 if (wanted_nb_samples != is->frame->nb_samples) {
2284                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2285                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2286                         fprintf(stderr, "swr_set_compensation() failed\n");
2287                         break;
2288                     }
2289                 }
2290                 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2291                 if (!is->audio_buf1)
2292                     return AVERROR(ENOMEM);
2293                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2294                 if (len2 < 0) {
2295                     fprintf(stderr, "swr_convert() failed\n");
2296                     break;
2297                 }
2298                 if (len2 == out_count) {
2299                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2300                     swr_init(is->swr_ctx);
2301                 }
2302                 is->audio_buf = is->audio_buf1;
2303                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2304             } else {
2305                 is->audio_buf = is->frame->data[0];
2306                 resampled_data_size = data_size;
2307             }
2308
2309             audio_clock0 = is->audio_clock;
2310             /* update the audio clock with the pts */
2311             if (is->frame->pts != AV_NOPTS_VALUE) {
2312                 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2313                 is->audio_clock_serial = is->audio_pkt_temp_serial;
2314             }
2315 #ifdef DEBUG
2316             {
2317                 static double last_clock;
2318                 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2319                        is->audio_clock - last_clock,
2320                        is->audio_clock, audio_clock0);
2321                 last_clock = is->audio_clock;
2322             }
2323 #endif
2324             return resampled_data_size;
2325         }
2326
2327         /* free the current packet */
2328         if (pkt->data)
2329             av_free_packet(pkt);
2330         memset(pkt_temp, 0, sizeof(*pkt_temp));
2331
2332         if (is->audioq.abort_request) {
2333             return -1;
2334         }
2335
2336         if (is->audioq.nb_packets == 0)
2337             SDL_CondSignal(is->continue_read_thread);
2338
2339         /* read next packet */
2340         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2341             return -1;
2342
2343         if (pkt->data == flush_pkt.data) {
2344             avcodec_flush_buffers(dec);
2345             flush_complete = 0;
2346             is->audio_buf_frames_pending = 0;
2347         }
2348
2349         *pkt_temp = *pkt;
2350     }
2351 }
2352
2353 /* prepare a new audio buffer */
2354 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2355 {
2356     VideoState *is = opaque;
2357     int audio_size, len1;
2358     int bytes_per_sec;
2359     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2360
2361     audio_callback_time = av_gettime();
2362
2363     while (len > 0) {
2364         if (is->audio_buf_index >= is->audio_buf_size) {
2365            audio_size = audio_decode_frame(is);
2366            if (audio_size < 0) {
2367                 /* if error, just output silence */
2368                is->audio_buf      = is->silence_buf;
2369                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2370            } else {
2371                if (is->show_mode != SHOW_MODE_VIDEO)
2372                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2373                is->audio_buf_size = audio_size;
2374            }
2375            is->audio_buf_index = 0;
2376         }
2377         len1 = is->audio_buf_size - is->audio_buf_index;
2378         if (len1 > len)
2379             len1 = len;
2380         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2381         len -= len1;
2382         stream += len1;
2383         is->audio_buf_index += len1;
2384     }
2385     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2386     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2387     /* Let's assume the audio driver that is used by SDL has two periods. */
2388     set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2389     sync_clock_to_slave(&is->extclk, &is->audclk);
2390 }
2391
2392 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2393 {
2394     SDL_AudioSpec wanted_spec, spec;
2395     const char *env;
2396     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2397
2398     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2399     if (env) {
2400         wanted_nb_channels = atoi(env);
2401         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2402     }
2403     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2404         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2405         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2406     }
2407     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2408     wanted_spec.freq = wanted_sample_rate;
2409     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2410         fprintf(stderr, "Invalid sample rate or channel count!\n");
2411         return -1;
2412     }
2413     wanted_spec.format = AUDIO_S16SYS;
2414     wanted_spec.silence = 0;
2415     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2416     wanted_spec.callback = sdl_audio_callback;
2417     wanted_spec.userdata = opaque;
2418     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2419         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2420         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2421         if (!wanted_spec.channels) {
2422             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2423             return -1;
2424         }
2425         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2426     }
2427     if (spec.format != AUDIO_S16SYS) {
2428         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2429         return -1;
2430     }
2431     if (spec.channels != wanted_spec.channels) {
2432         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2433         if (!wanted_channel_layout) {
2434             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2435             return -1;
2436         }
2437     }
2438
2439     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2440     audio_hw_params->freq = spec.freq;
2441     audio_hw_params->channel_layout = wanted_channel_layout;
2442     audio_hw_params->channels =  spec.channels;
2443     return spec.size;
2444 }
2445
2446 /* open a given stream. Return 0 if OK */
2447 static int stream_component_open(VideoState *is, int stream_index)
2448 {
2449     AVFormatContext *ic = is->ic;
2450     AVCodecContext *avctx;
2451     AVCodec *codec;
2452     const char *forced_codec_name = NULL;
2453     AVDictionary *opts;
2454     AVDictionaryEntry *t = NULL;
2455     int sample_rate, nb_channels;
2456     int64_t channel_layout;
2457     int ret;
2458
2459     if (stream_index < 0 || stream_index >= ic->nb_streams)
2460         return -1;
2461     avctx = ic->streams[stream_index]->codec;
2462
2463     codec = avcodec_find_decoder(avctx->codec_id);
2464
2465     switch(avctx->codec_type){
2466         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; forced_codec_name =    audio_codec_name; break;
2467         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2468         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; forced_codec_name =    video_codec_name; break;
2469     }
2470     if (forced_codec_name)
2471         codec = avcodec_find_decoder_by_name(forced_codec_name);
2472     if (!codec) {
2473         if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2474         else                   fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2475         return -1;
2476     }
2477
2478     avctx->codec_id = codec->id;
2479     avctx->workaround_bugs   = workaround_bugs;
2480     avctx->lowres            = lowres;
2481     if(avctx->lowres > codec->max_lowres){
2482         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2483                 codec->max_lowres);
2484         avctx->lowres= codec->max_lowres;
2485     }
2486     avctx->idct_algo         = idct;
2487     avctx->error_concealment = error_concealment;
2488
2489     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2490     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2491     if(codec->capabilities & CODEC_CAP_DR1)
2492         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2493
2494     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2495     if (!av_dict_get(opts, "threads", NULL, 0))
2496         av_dict_set(&opts, "threads", "auto", 0);
2497     if (avctx->lowres)
2498         av_dict_set(&opts, "lowres", av_asprintf("%d", avctx->lowres), AV_DICT_DONT_STRDUP_VAL);
2499     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2500         av_dict_set(&opts, "refcounted_frames", "1", 0);
2501     if (avcodec_open2(avctx, codec, &opts) < 0)
2502         return -1;
2503     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2504         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2505         return AVERROR_OPTION_NOT_FOUND;
2506     }
2507
2508     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2509     switch (avctx->codec_type) {
2510     case AVMEDIA_TYPE_AUDIO:
2511 #if CONFIG_AVFILTER
2512         {
2513             AVFilterLink *link;
2514
2515             is->audio_filter_src.freq           = avctx->sample_rate;
2516             is->audio_filter_src.channels       = avctx->channels;
2517             is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2518             is->audio_filter_src.fmt            = avctx->sample_fmt;
2519             if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2520                 return ret;
2521             link = is->out_audio_filter->inputs[0];
2522             sample_rate    = link->sample_rate;
2523             nb_channels    = link->channels;
2524             channel_layout = link->channel_layout;
2525         }
2526 #else
2527         sample_rate    = avctx->sample_rate;
2528         nb_channels    = avctx->channels;
2529         channel_layout = avctx->channel_layout;
2530 #endif
2531
2532         /* prepare audio output */
2533         if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2534             return ret;
2535         is->audio_hw_buf_size = ret;
2536         is->audio_src = is->audio_tgt;
2537         is->audio_buf_size  = 0;
2538         is->audio_buf_index = 0;
2539
2540         /* init averaging filter */
2541         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2542         is->audio_diff_avg_count = 0;
2543         /* since we do not have a precise anough audio fifo fullness,
2544            we correct audio sync only if larger than this threshold */
2545         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2546
2547         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2548         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2549
2550         is->audio_stream = stream_index;
2551         is->audio_st = ic->streams[stream_index];
2552
2553         packet_queue_start(&is->audioq);
2554         SDL_PauseAudio(0);
2555         break;
2556     case AVMEDIA_TYPE_VIDEO:
2557         is->video_stream = stream_index;
2558         is->video_st = ic->streams[stream_index];
2559
2560         packet_queue_start(&is->videoq);
2561         is->video_tid = SDL_CreateThread(video_thread, is);
2562         is->queue_attachments_req = 1;
2563         break;
2564     case AVMEDIA_TYPE_SUBTITLE:
2565         is->subtitle_stream = stream_index;
2566         is->subtitle_st = ic->streams[stream_index];
2567         packet_queue_start(&is->subtitleq);
2568
2569         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2570         break;
2571     default:
2572         break;
2573     }
2574     return 0;
2575 }
2576
2577 static void stream_component_close(VideoState *is, int stream_index)
2578 {
2579     AVFormatContext *ic = is->ic;
2580     AVCodecContext *avctx;
2581
2582     if (stream_index < 0 || stream_index >= ic->nb_streams)
2583         return;
2584     avctx = ic->streams[stream_index]->codec;
2585
2586     switch (avctx->codec_type) {
2587     case AVMEDIA_TYPE_AUDIO:
2588         packet_queue_abort(&is->audioq);
2589
2590         SDL_CloseAudio();
2591
2592         packet_queue_flush(&is->audioq);
2593         av_free_packet(&is->audio_pkt);
2594         swr_free(&is->swr_ctx);
2595         av_freep(&is->audio_buf1);
2596         is->audio_buf1_size = 0;
2597         is->audio_buf = NULL;
2598         av_frame_free(&is->frame);
2599
2600         if (is->rdft) {
2601             av_rdft_end(is->rdft);
2602             av_freep(&is->rdft_data);
2603             is->rdft = NULL;
2604             is->rdft_bits = 0;
2605         }
2606 #if CONFIG_AVFILTER
2607         avfilter_graph_free(&is->agraph);
2608 #endif
2609         break;
2610     case AVMEDIA_TYPE_VIDEO:
2611         packet_queue_abort(&is->videoq);
2612
2613         /* note: we also signal this mutex to make sure we deblock the
2614            video thread in all cases */
2615         SDL_LockMutex(is->pictq_mutex);
2616         SDL_CondSignal(is->pictq_cond);
2617         SDL_UnlockMutex(is->pictq_mutex);
2618
2619         SDL_WaitThread(is->video_tid, NULL);
2620
2621         packet_queue_flush(&is->videoq);
2622         break;
2623     case AVMEDIA_TYPE_SUBTITLE:
2624         packet_queue_abort(&is->subtitleq);
2625
2626         /* note: we also signal this mutex to make sure we deblock the
2627            video thread in all cases */
2628         SDL_LockMutex(is->subpq_mutex);
2629         is->subtitle_stream_changed = 1;
2630
2631         SDL_CondSignal(is->subpq_cond);
2632         SDL_UnlockMutex(is->subpq_mutex);
2633
2634         SDL_WaitThread(is->subtitle_tid, NULL);
2635
2636         packet_queue_flush(&is->subtitleq);
2637         break;
2638     default:
2639         break;
2640     }
2641
2642     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2643     avcodec_close(avctx);
2644     switch (avctx->codec_type) {
2645     case AVMEDIA_TYPE_AUDIO:
2646         is->audio_st = NULL;
2647         is->audio_stream = -1;
2648         break;
2649     case AVMEDIA_TYPE_VIDEO:
2650         is->video_st = NULL;
2651         is->video_stream = -1;
2652         break;
2653     case AVMEDIA_TYPE_SUBTITLE:
2654         is->subtitle_st = NULL;
2655         is->subtitle_stream = -1;
2656         break;
2657     default:
2658         break;
2659     }
2660 }
2661
2662 static int decode_interrupt_cb(void *ctx)
2663 {
2664     VideoState *is = ctx;
2665     return is->abort_request;
2666 }
2667
2668 static int is_realtime(AVFormatContext *s)
2669 {
2670     if(   !strcmp(s->iformat->name, "rtp")
2671        || !strcmp(s->iformat->name, "rtsp")
2672        || !strcmp(s->iformat->name, "sdp")
2673     )
2674         return 1;
2675
2676     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2677                  || !strncmp(s->filename, "udp:", 4)
2678                 )
2679     )
2680         return 1;
2681     return 0;
2682 }
2683
2684 /* this thread gets the stream from the disk or the network */
2685 static int read_thread(void *arg)
2686 {
2687     VideoState *is = arg;
2688     AVFormatContext *ic = NULL;
2689     int err, i, ret;
2690     int st_index[AVMEDIA_TYPE_NB];
2691     AVPacket pkt1, *pkt = &pkt1;
2692     int eof = 0;
2693     int pkt_in_play_range = 0;
2694     AVDictionaryEntry *t;
2695     AVDictionary **opts;
2696     int orig_nb_streams;
2697     SDL_mutex *wait_mutex = SDL_CreateMutex();
2698
2699     memset(st_index, -1, sizeof(st_index));
2700     is->last_video_stream = is->video_stream = -1;
2701     is->last_audio_stream = is->audio_stream = -1;
2702     is->last_subtitle_stream = is->subtitle_stream = -1;
2703
2704     ic = avformat_alloc_context();
2705     ic->interrupt_callback.callback = decode_interrupt_cb;
2706     ic->interrupt_callback.opaque = is;
2707     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2708     if (err < 0) {
2709         print_error(is->filename, err);
2710         ret = -1;
2711         goto fail;
2712     }
2713     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2714         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2715         ret = AVERROR_OPTION_NOT_FOUND;
2716         goto fail;
2717     }
2718     is->ic = ic;
2719
2720     if (genpts)
2721         ic->flags |= AVFMT_FLAG_GENPTS;
2722
2723     opts = setup_find_stream_info_opts(ic, codec_opts);
2724     orig_nb_streams = ic->nb_streams;
2725
2726     err = avformat_find_stream_info(ic, opts);
2727     if (err < 0) {
2728         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2729         ret = -1;
2730         goto fail;
2731     }
2732     for (i = 0; i < orig_nb_streams; i++)
2733         av_dict_free(&opts[i]);
2734     av_freep(&opts);
2735
2736     if (ic->pb)
2737         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2738
2739     if (seek_by_bytes < 0)
2740         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2741
2742     is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2743
2744     if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2745         window_title = av_asprintf("%s - %s", t->value, input_filename);
2746
2747     /* if seeking requested, we execute it */
2748     if (start_time != AV_NOPTS_VALUE) {
2749         int64_t timestamp;
2750
2751         timestamp = start_time;
2752         /* add the stream start time */
2753         if (ic->start_time != AV_NOPTS_VALUE)
2754             timestamp += ic->start_time;
2755         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2756         if (ret < 0) {
2757             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2758                     is->filename, (double)timestamp / AV_TIME_BASE);
2759         }
2760     }
2761
2762     is->realtime = is_realtime(ic);
2763
2764     for (i = 0; i < ic->nb_streams; i++)
2765         ic->streams[i]->discard = AVDISCARD_ALL;
2766     if (!video_disable)
2767         st_index[AVMEDIA_TYPE_VIDEO] =
2768             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2769                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2770     if (!audio_disable)
2771         st_index[AVMEDIA_TYPE_AUDIO] =
2772             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2773                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2774                                 st_index[AVMEDIA_TYPE_VIDEO],
2775                                 NULL, 0);
2776     if (!video_disable && !subtitle_disable)
2777         st_index[AVMEDIA_TYPE_SUBTITLE] =
2778             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2779                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2780                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2781                                  st_index[AVMEDIA_TYPE_AUDIO] :
2782                                  st_index[AVMEDIA_TYPE_VIDEO]),
2783                                 NULL, 0);
2784     if (show_status) {
2785         av_dump_format(ic, 0, is->filename, 0);
2786     }
2787
2788     is->show_mode = show_mode;
2789
2790     /* open the streams */
2791     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2792         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2793     }
2794
2795     ret = -1;
2796     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2797         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2798     }
2799     if (is->show_mode == SHOW_MODE_NONE)
2800         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2801
2802     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2803         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2804     }
2805
2806     if (is->video_stream < 0 && is->audio_stream < 0) {
2807         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2808         ret = -1;
2809         goto fail;
2810     }
2811
2812     if (infinite_buffer < 0 && is->realtime)
2813         infinite_buffer = 1;
2814
2815     for (;;) {
2816         if (is->abort_request)
2817             break;
2818         if (is->paused != is->last_paused) {
2819             is->last_paused = is->paused;
2820             if (is->paused)
2821                 is->read_pause_return = av_read_pause(ic);
2822             else
2823                 av_read_play(ic);
2824         }
2825 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2826         if (is->paused &&
2827                 (!strcmp(ic->iformat->name, "rtsp") ||
2828                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2829             /* wait 10 ms to avoid trying to get another packet */
2830             /* XXX: horrible */
2831             SDL_Delay(10);
2832             continue;
2833         }
2834 #endif
2835         if (is->seek_req) {
2836             int64_t seek_target = is->seek_pos;
2837             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2838             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2839 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2840 //      of the seek_pos/seek_rel variables
2841
2842             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2843             if (ret < 0) {
2844                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2845             } else {
2846                 if (is->audio_stream >= 0) {
2847                     packet_queue_flush(&is->audioq);
2848                     packet_queue_put(&is->audioq, &flush_pkt);
2849                 }
2850                 if (is->subtitle_stream >= 0) {
2851                     packet_queue_flush(&is->subtitleq);
2852                     packet_queue_put(&is->subtitleq, &flush_pkt);
2853                 }
2854                 if (is->video_stream >= 0) {
2855                     packet_queue_flush(&is->videoq);
2856                     packet_queue_put(&is->videoq, &flush_pkt);
2857                 }
2858                 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2859                    set_clock(&is->extclk, NAN, 0);
2860                 } else {
2861                    set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2862                 }
2863             }
2864             is->seek_req = 0;
2865             is->queue_attachments_req = 1;
2866             eof = 0;
2867             if (is->paused)
2868                 step_to_next_frame(is);
2869         }
2870         if (is->queue_attachments_req) {
2871             if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2872                 AVPacket copy;
2873                 if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2874                     goto fail;
2875                 packet_queue_put(&is->videoq, &copy);
2876             }
2877             is->queue_attachments_req = 0;
2878         }
2879
2880         /* if the queue are full, no need to read more */
2881         if (infinite_buffer<1 &&
2882               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2883             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2884                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2885                     || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2886                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2887             /* wait 10 ms */
2888             SDL_LockMutex(wait_mutex);
2889             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2890             SDL_UnlockMutex(wait_mutex);
2891             continue;
2892         }
2893         if (eof) {
2894             if (is->video_stream >= 0) {
2895                 av_init_packet(pkt);
2896                 pkt->data = NULL;
2897                 pkt->size = 0;
2898                 pkt->stream_index = is->video_stream;
2899                 packet_queue_put(&is->videoq, pkt);
2900             }
2901             if (is->audio_stream >= 0 &&
2902                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2903                 av_init_packet(pkt);
2904                 pkt->data = NULL;
2905                 pkt->size = 0;
2906                 pkt->stream_index = is->audio_stream;
2907                 packet_queue_put(&is->audioq, pkt);
2908             }
2909             SDL_Delay(10);
2910             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2911                 if (loop != 1 && (!loop || --loop)) {
2912                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2913                 } else if (autoexit) {
2914                     ret = AVERROR_EOF;
2915                     goto fail;
2916                 }
2917             }
2918             eof=0;
2919             continue;
2920         }
2921         ret = av_read_frame(ic, pkt);
2922         if (ret < 0) {
2923             if (ret == AVERROR_EOF || url_feof(ic->pb))
2924                 eof = 1;
2925             if (ic->pb && ic->pb->error)
2926                 break;
2927             SDL_LockMutex(wait_mutex);
2928             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2929             SDL_UnlockMutex(wait_mutex);
2930             continue;
2931         }
2932         /* check if packet is in play range specified by user, then queue, otherwise discard */
2933         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2934                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2935                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2936                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2937                 <= ((double)duration / 1000000);
2938         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2939             packet_queue_put(&is->audioq, pkt);
2940         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2941                    && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
2942             packet_queue_put(&is->videoq, pkt);
2943         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2944             packet_queue_put(&is->subtitleq, pkt);
2945         } else {
2946             av_free_packet(pkt);
2947         }
2948     }
2949     /* wait until the end */
2950     while (!is->abort_request) {
2951         SDL_Delay(100);
2952     }
2953
2954     ret = 0;
2955  fail:
2956     /* close each stream */
2957     if (is->audio_stream >= 0)
2958         stream_component_close(is, is->audio_stream);
2959     if (is->video_stream >= 0)
2960         stream_component_close(is, is->video_stream);
2961     if (is->subtitle_stream >= 0)
2962         stream_component_close(is, is->subtitle_stream);
2963     if (is->ic) {
2964         avformat_close_input(&is->ic);
2965     }
2966
2967     if (ret != 0) {
2968         SDL_Event event;
2969
2970         event.type = FF_QUIT_EVENT;
2971         event.user.data1 = is;
2972         SDL_PushEvent(&event);
2973     }
2974     SDL_DestroyMutex(wait_mutex);
2975     return 0;
2976 }
2977
2978 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2979 {
2980     VideoState *is;
2981
2982     is = av_mallocz(sizeof(VideoState));
2983     if (!is)
2984         return NULL;
2985     av_strlcpy(is->filename, filename, sizeof(is->filename));
2986     is->iformat = iformat;
2987     is->ytop    = 0;
2988     is->xleft   = 0;
2989
2990     /* start video display */
2991     is->pictq_mutex = SDL_CreateMutex();
2992     is->pictq_cond  = SDL_CreateCond();
2993
2994     is->subpq_mutex = SDL_CreateMutex();
2995     is->subpq_cond  = SDL_CreateCond();
2996
2997     packet_queue_init(&is->videoq);
2998     packet_queue_init(&is->audioq);
2999     packet_queue_init(&is->subtitleq);
3000
3001     is->continue_read_thread = SDL_CreateCond();
3002
3003     init_clock(&is->vidclk, &is->videoq.serial);
3004     init_clock(&is->audclk, &is->audioq.serial);
3005     init_clock(&is->extclk, &is->extclk.serial);
3006     is->audio_clock_serial = -1;
3007     is->audio_last_serial = -1;
3008     is->av_sync_type = av_sync_type;
3009     is->read_tid     = SDL_CreateThread(read_thread, is);
3010     if (!is->read_tid) {
3011         av_free(is);
3012         return NULL;
3013     }
3014     return is;
3015 }
3016
3017 static void stream_cycle_channel(VideoState *is, int codec_type)
3018 {
3019     AVFormatContext *ic = is->ic;
3020     int start_index, stream_index;
3021     int old_index;
3022     AVStream *st;
3023
3024     if (codec_type == AVMEDIA_TYPE_VIDEO) {
3025         start_index = is->last_video_stream;
3026         old_index = is->video_stream;
3027     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3028         start_index = is->last_audio_stream;
3029         old_index = is->audio_stream;
3030     } else {
3031         start_index = is->last_subtitle_stream;
3032         old_index = is->subtitle_stream;
3033     }
3034     stream_index = start_index;
3035     for (;;) {
3036         if (++stream_index >= is->ic->nb_streams)
3037         {
3038             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3039             {
3040                 stream_index = -1;
3041                 is->last_subtitle_stream = -1;
3042                 goto the_end;
3043             }
3044             if (start_index == -1)
3045                 return;
3046             stream_index = 0;
3047         }
3048         if (stream_index == start_index)
3049             return;
3050         st = ic->streams[stream_index];
3051         if (st->codec->codec_type == codec_type) {
3052             /* check that parameters are OK */
3053             switch (codec_type) {
3054             case AVMEDIA_TYPE_AUDIO:
3055                 if (st->codec->sample_rate != 0 &&
3056                     st->codec->channels != 0)
3057                     goto the_end;
3058                 break;
3059             case AVMEDIA_TYPE_VIDEO:
3060             case AVMEDIA_TYPE_SUBTITLE:
3061                 goto the_end;
3062             default:
3063                 break;
3064             }
3065         }
3066     }
3067  the_end:
3068     stream_component_close(is, old_index);
3069     stream_component_open(is, stream_index);
3070 }
3071
3072
3073 static void toggle_full_screen(VideoState *is)
3074 {
3075 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3076     /* OS X needs to reallocate the SDL overlays */
3077     int i;
3078     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3079         is->pictq[i].reallocate = 1;
3080 #endif
3081     is_full_screen = !is_full_screen;
3082     video_open(is, 1, NULL);
3083 }
3084
3085 static void toggle_audio_display(VideoState *is)
3086 {
3087     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3088     int next = is->show_mode;
3089     do {
3090         next = (next + 1) % SHOW_MODE_NB;
3091     } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3092     if (is->show_mode != next) {
3093         fill_rectangle(screen,
3094                     is->xleft, is->ytop, is->width, is->height,
3095                     bgcolor, 1);
3096         is->force_refresh = 1;
3097         is->show_mode = next;
3098     }
3099 }
3100
3101 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3102     double remaining_time = 0.0;
3103     SDL_PumpEvents();
3104     while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3105         if (!cursor_hidden && av_gettime() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3106             SDL_ShowCursor(0);
3107             cursor_hidden = 1;
3108         }
3109         if (remaining_time > 0.0)
3110             av_usleep((int64_t)(remaining_time * 1000000.0));
3111         remaining_time = REFRESH_RATE;
3112         if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3113             video_refresh(is, &remaining_time);
3114         SDL_PumpEvents();
3115     }
3116 }
3117
3118 /* handle an event sent by the GUI */
3119 static void event_loop(VideoState *cur_stream)
3120 {
3121     SDL_Event event;
3122     double incr, pos, frac;
3123
3124     for (;;) {
3125         double x;
3126         refresh_loop_wait_event(cur_stream, &event);
3127         switch (event.type) {
3128         case SDL_KEYDOWN:
3129             if (exit_on_keydown) {
3130                 do_exit(cur_stream);
3131                 break;
3132             }
3133             switch (event.key.keysym.sym) {
3134             case SDLK_ESCAPE:
3135             case SDLK_q:
3136                 do_exit(cur_stream);
3137                 break;
3138             case SDLK_f:
3139                 toggle_full_screen(cur_stream);
3140                 cur_stream->force_refresh = 1;
3141                 break;
3142             case SDLK_p:
3143             case SDLK_SPACE:
3144                 toggle_pause(cur_stream);
3145                 break;
3146             case SDLK_s: // S: Step to next frame
3147                 step_to_next_frame(cur_stream);
3148                 break;
3149             case SDLK_a:
3150                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3151                 break;
3152             case SDLK_v:
3153                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3154                 break;
3155             case SDLK_t:
3156                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3157                 break;
3158             case SDLK_w:
3159                 toggle_audio_display(cur_stream);
3160                 break;
3161             case SDLK_PAGEUP:
3162                 incr = 600.0;
3163                 goto do_seek;
3164             case SDLK_PAGEDOWN:
3165                 incr = -600.0;
3166                 goto do_seek;
3167             case SDLK_LEFT:
3168                 incr = -10.0;
3169                 goto do_seek;
3170             case SDLK_RIGHT:
3171                 incr = 10.0;
3172                 goto do_seek;
3173             case SDLK_UP:
3174                 incr = 60.0;
3175                 goto do_seek;
3176             case SDLK_DOWN:
3177                 incr = -60.0;
3178             do_seek:
3179                     if (seek_by_bytes) {
3180                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3181                             pos = cur_stream->video_current_pos;
3182                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3183                             pos = cur_stream->audio_pkt.pos;
3184                         } else
3185                             pos = avio_tell(cur_stream->ic->pb);
3186                         if (cur_stream->ic->bit_rate)
3187                             incr *= cur_stream->ic->bit_rate / 8.0;
3188                         else
3189                             incr *= 180000.0;
3190                         pos += incr;
3191                         stream_seek(cur_stream, pos, incr, 1);
3192                     } else {
3193                         pos = get_master_clock(cur_stream);
3194                         if (isnan(pos))
3195                             pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3196                         pos += incr;
3197                         if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3198                             pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3199                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3200                     }
3201                 break;
3202             default:
3203                 break;
3204             }
3205             break;
3206         case SDL_VIDEOEXPOSE:
3207             cur_stream->force_refresh = 1;
3208             break;
3209         case SDL_MOUSEBUTTONDOWN:
3210             if (exit_on_mousedown) {
3211                 do_exit(cur_stream);
3212                 break;
3213             }
3214         case SDL_MOUSEMOTION:
3215             if (cursor_hidden) {
3216                 SDL_ShowCursor(1);
3217                 cursor_hidden = 0;
3218             }
3219             cursor_last_shown = av_gettime();
3220             if (event.type == SDL_MOUSEBUTTONDOWN) {
3221                 x = event.button.x;
3222             } else {
3223                 if (event.motion.state != SDL_PRESSED)
3224                     break;
3225                 x = event.motion.x;
3226             }
3227                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3228                     uint64_t size =  avio_size(cur_stream->ic->pb);
3229                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3230                 } else {
3231                     int64_t ts;
3232                     int ns, hh, mm, ss;
3233                     int tns, thh, tmm, tss;
3234                     tns  = cur_stream->ic->duration / 1000000LL;
3235                     thh  = tns / 3600;
3236                     tmm  = (tns % 3600) / 60;
3237                     tss  = (tns % 60);
3238                     frac = x / cur_stream->width;
3239                     ns   = frac * tns;
3240                     hh   = ns / 3600;
3241                     mm   = (ns % 3600) / 60;
3242                     ss   = (ns % 60);
3243                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
3244                             hh, mm, ss, thh, tmm, tss);
3245                     ts = frac * cur_stream->ic->duration;
3246                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3247                         ts += cur_stream->ic->start_time;
3248                     stream_seek(cur_stream, ts, 0, 0);
3249                 }
3250             break;
3251         case SDL_VIDEORESIZE:
3252                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3253                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3254                 screen_width  = cur_stream->width  = event.resize.w;
3255                 screen_height = cur_stream->height = event.resize.h;
3256                 cur_stream->force_refresh = 1;
3257             break;
3258         case SDL_QUIT:
3259         case FF_QUIT_EVENT:
3260             do_exit(cur_stream);
3261             break;
3262         case FF_ALLOC_EVENT:
3263             alloc_picture(event.user.data1);
3264             break;
3265         default:
3266             break;
3267         }
3268     }
3269 }
3270
3271 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3272 {
3273     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3274     return opt_default(NULL, "video_size", arg);
3275 }
3276
3277 static int opt_width(void *optctx, const char *opt, const char *arg)
3278 {
3279     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3280     return 0;
3281 }
3282
3283 static int opt_height(void *optctx, const char *opt, const char *arg)
3284 {
3285     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3286     return 0;
3287 }
3288
3289 static int opt_format(void *optctx, const char *opt, const char *arg)
3290 {
3291     file_iformat = av_find_input_format(arg);
3292     if (!file_iformat) {
3293         fprintf(stderr, "Unknown input format: %s\n", arg);
3294         return AVERROR(EINVAL);
3295     }
3296     return 0;
3297 }
3298
3299 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3300 {
3301     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3302     return opt_default(NULL, "pixel_format", arg);
3303 }
3304
3305 static int opt_sync(void *optctx, const char *opt, const char *arg)
3306 {
3307     if (!strcmp(arg, "audio"))
3308         av_sync_type = AV_SYNC_AUDIO_MASTER;
3309     else if (!strcmp(arg, "video"))
3310         av_sync_type = AV_SYNC_VIDEO_MASTER;
3311     else if (!strcmp(arg, "ext"))
3312         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3313     else {
3314         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3315         exit(1);
3316     }
3317     return 0;
3318 }
3319
3320 static int opt_seek(void *optctx, const char *opt, const char *arg)
3321 {
3322     start_time = parse_time_or_die(opt, arg, 1);
3323     return 0;
3324 }
3325
3326 static int opt_duration(void *optctx, const char *opt, const char *arg)
3327 {
3328     duration = parse_time_or_die(opt, arg, 1);
3329     return 0;
3330 }
3331
3332 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3333 {
3334     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3335                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3336                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3337                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3338     return 0;
3339 }
3340
3341 static void opt_input_file(void *optctx, const char *filename)
3342 {
3343     if (input_filename) {
3344         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3345                 filename, input_filename);
3346         exit(1);
3347     }
3348     if (!strcmp(filename, "-"))
3349         filename = "pipe:";
3350     input_filename = filename;
3351 }
3352
3353 static int opt_codec(void *optctx, const char *opt, const char *arg)
3354 {
3355    const char *spec = strchr(opt, ':');
3356    if (!spec) {
3357        fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3358                arg, opt);
3359        return AVERROR(EINVAL);
3360    }
3361    spec++;
3362    switch (spec[0]) {
3363    case 'a' :    audio_codec_name = arg; break;
3364    case 's' : subtitle_codec_name = arg; break;
3365    case 'v' :    video_codec_name = arg; break;
3366    default:
3367        fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3368        return AVERROR(EINVAL);
3369    }
3370    return 0;
3371 }
3372
3373 static int dummy;
3374
3375 static const OptionDef options[] = {
3376 #include "cmdutils_common_opts.h"
3377     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3378     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3379     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3380     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3381     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3382     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3383     { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3384     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3385     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3386     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3387     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3388     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3389     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3390     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3391     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3392     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3393     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3394     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3395     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3396     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3397     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3398     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3399     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3400     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3401     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3402     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3403     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3404     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3405     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3406     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3407     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3408     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3409 #if CONFIG_AVFILTER
3410     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3411     { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3412 #endif
3413     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3414     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3415     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3416     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3417     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3418     { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &audio_codec_name }, "force audio decoder",    "decoder_name" },
3419     { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3420     { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, {    &video_codec_name }, "force video decoder",    "decoder_name" },
3421     { NULL, },
3422 };
3423
3424 static void show_usage(void)
3425 {
3426     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3427     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3428     av_log(NULL, AV_LOG_INFO, "\n");
3429 }
3430
3431 void show_help_default(const char *opt, const char *arg)
3432 {
3433     av_log_set_callback(log_callback_help);
3434     show_usage();
3435     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3436     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3437     printf("\n");
3438     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3439     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3440 #if !CONFIG_AVFILTER
3441     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3442 #else
3443     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3444 #endif
3445     printf("\nWhile playing:\n"
3446            "q, ESC              quit\n"
3447            "f                   toggle full screen\n"
3448            "p, SPC              pause\n"
3449            "a                   cycle audio channel\n"
3450            "v                   cycle video channel\n"
3451            "t                   cycle subtitle channel\n"
3452            "w                   show audio waves\n"
3453            "s                   activate frame-step mode\n"
3454            "left/right          seek backward/forward 10 seconds\n"
3455            "down/up             seek backward/forward 1 minute\n"
3456            "page down/page up   seek backward/forward 10 minutes\n"
3457            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3458            );
3459 }
3460
3461 static int lockmgr(void **mtx, enum AVLockOp op)
3462 {
3463    switch(op) {
3464       case AV_LOCK_CREATE:
3465           *mtx = SDL_CreateMutex();
3466           if(!*mtx)
3467               return 1;
3468           return 0;
3469       case AV_LOCK_OBTAIN:
3470           return !!SDL_LockMutex(*mtx);
3471       case AV_LOCK_RELEASE:
3472           return !!SDL_UnlockMutex(*mtx);
3473       case AV_LOCK_DESTROY:
3474           SDL_DestroyMutex(*mtx);
3475           return 0;
3476    }
3477    return 1;
3478 }
3479
3480 /* Called from the main */
3481 int main(int argc, char **argv)
3482 {
3483     int flags;
3484     VideoState *is;
3485     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3486
3487     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3488     parse_loglevel(argc, argv, options);
3489
3490     /* register all codecs, demux and protocols */
3491     avcodec_register_all();
3492 #if CONFIG_AVDEVICE
3493     avdevice_register_all();
3494 #endif
3495 #if CONFIG_AVFILTER
3496     avfilter_register_all();
3497 #endif
3498     av_register_all();
3499     avformat_network_init();
3500
3501     init_opts();
3502
3503     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3504     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3505
3506     show_banner(argc, argv, options);
3507
3508     parse_options(NULL, argc, argv, options, opt_input_file);
3509
3510     if (!input_filename) {
3511         show_usage();
3512         fprintf(stderr, "An input file must be specified\n");
3513         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3514         exit(1);
3515     }
3516
3517     if (display_disable) {
3518         video_disable = 1;
3519     }
3520     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3521     if (audio_disable)
3522         flags &= ~SDL_INIT_AUDIO;
3523     if (display_disable)
3524         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3525 #if !defined(__MINGW32__) && !defined(__APPLE__)
3526     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3527 #endif
3528     if (SDL_Init (flags)) {
3529         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3530         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3531         exit(1);
3532     }
3533
3534     if (!display_disable) {
3535         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3536         fs_screen_width = vi->current_w;
3537         fs_screen_height = vi->current_h;
3538     }
3539
3540     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3541     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3542     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3543
3544     if (av_lockmgr_register(lockmgr)) {
3545         fprintf(stderr, "Could not initialize lock manager!\n");
3546         do_exit(NULL);
3547     }
3548
3549     av_init_packet(&flush_pkt);
3550     flush_pkt.data = (uint8_t *)&flush_pkt;
3551
3552     is = stream_open(input_filename, file_iformat);
3553     if (!is) {
3554         fprintf(stderr, "Failed to initialize VideoState!\n");
3555         do_exit(NULL);
3556     }
3557
3558     event_loop(is);
3559
3560     /* never returns */
3561
3562     return 0;
3563 }