ffplay: get rid of void casts in the option table
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;                                  ///< presentation time stamp for this picture
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     AVRational sample_aspect_ratio;
108     int allocated;
109     int reallocate;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 typedef struct AudioParams {
122     int freq;
123     int channels;
124     int channel_layout;
125     enum AVSampleFormat fmt;
126 } AudioParams;
127
128 enum {
129     AV_SYNC_AUDIO_MASTER, /* default choice */
130     AV_SYNC_VIDEO_MASTER,
131     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
132 };
133
134 typedef struct VideoState {
135     SDL_Thread *read_tid;
136     SDL_Thread *video_tid;
137     SDL_Thread *refresh_tid;
138     AVInputFormat *iformat;
139     int no_background;
140     int abort_request;
141     int force_refresh;
142     int paused;
143     int last_paused;
144     int que_attachments_req;
145     int seek_req;
146     int seek_flags;
147     int64_t seek_pos;
148     int64_t seek_rel;
149     int read_pause_return;
150     AVFormatContext *ic;
151
152     int audio_stream;
153
154     int av_sync_type;
155     double external_clock; /* external clock base */
156     int64_t external_clock_time;
157
158     double audio_clock;
159     double audio_diff_cum; /* used for AV difference average computation */
160     double audio_diff_avg_coef;
161     double audio_diff_threshold;
162     int audio_diff_avg_count;
163     AVStream *audio_st;
164     PacketQueue audioq;
165     int audio_hw_buf_size;
166     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
167     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
168     uint8_t *audio_buf;
169     uint8_t *audio_buf1;
170     unsigned int audio_buf_size; /* in bytes */
171     int audio_buf_index; /* in bytes */
172     int audio_write_buf_size;
173     AVPacket audio_pkt_temp;
174     AVPacket audio_pkt;
175     struct AudioParams audio_src;
176     struct AudioParams audio_tgt;
177     struct SwrContext *swr_ctx;
178     double audio_current_pts;
179     double audio_current_pts_drift;
180     int frame_drops_early;
181     int frame_drops_late;
182     AVFrame *frame;
183
184     enum ShowMode {
185         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
186     } show_mode;
187     int16_t sample_array[SAMPLE_ARRAY_SIZE];
188     int sample_array_index;
189     int last_i_start;
190     RDFTContext *rdft;
191     int rdft_bits;
192     FFTSample *rdft_data;
193     int xpos;
194
195     SDL_Thread *subtitle_tid;
196     int subtitle_stream;
197     int subtitle_stream_changed;
198     AVStream *subtitle_st;
199     PacketQueue subtitleq;
200     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
201     int subpq_size, subpq_rindex, subpq_windex;
202     SDL_mutex *subpq_mutex;
203     SDL_cond *subpq_cond;
204
205     double frame_timer;
206     double frame_last_pts;
207     double frame_last_duration;
208     double frame_last_dropped_pts;
209     double frame_last_returned_time;
210     double frame_last_filter_delay;
211     int64_t frame_last_dropped_pos;
212     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
213     int video_stream;
214     AVStream *video_st;
215     PacketQueue videoq;
216     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
217     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
218     int64_t video_current_pos;                   ///< current displayed file pos
219     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
220     int pictq_size, pictq_rindex, pictq_windex;
221     SDL_mutex *pictq_mutex;
222     SDL_cond *pictq_cond;
223 #if !CONFIG_AVFILTER
224     struct SwsContext *img_convert_ctx;
225 #endif
226
227     char filename[1024];
228     int width, height, xleft, ytop;
229     int step;
230
231 #if CONFIG_AVFILTER
232     AVFilterContext *in_video_filter;           ///< the first filter in the video chain
233     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
234     int use_dr1;
235     FrameBuffer *buffer_pool;
236 #endif
237
238     int refresh;
239     int last_video_stream, last_audio_stream, last_subtitle_stream;
240 } VideoState;
241
242 typedef struct AllocEventProps {
243     VideoState *is;
244     AVFrame *frame;
245 } AllocEventProps;
246
247 /* options specified by the user */
248 static AVInputFormat *file_iformat;
249 static const char *input_filename;
250 static const char *window_title;
251 static int fs_screen_width;
252 static int fs_screen_height;
253 static int screen_width  = 0;
254 static int screen_height = 0;
255 static int audio_disable;
256 static int video_disable;
257 static int wanted_stream[AVMEDIA_TYPE_NB] = {
258     [AVMEDIA_TYPE_AUDIO]    = -1,
259     [AVMEDIA_TYPE_VIDEO]    = -1,
260     [AVMEDIA_TYPE_SUBTITLE] = -1,
261 };
262 static int seek_by_bytes = -1;
263 static int display_disable;
264 static int show_status = 1;
265 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
266 static int64_t start_time = AV_NOPTS_VALUE;
267 static int64_t duration = AV_NOPTS_VALUE;
268 static int workaround_bugs = 1;
269 static int fast = 0;
270 static int genpts = 0;
271 static int lowres = 0;
272 static int idct = FF_IDCT_AUTO;
273 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
274 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
275 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
276 static int error_concealment = 3;
277 static int decoder_reorder_pts = -1;
278 static int autoexit;
279 static int exit_on_keydown;
280 static int exit_on_mousedown;
281 static int loop = 1;
282 static int framedrop = -1;
283 static int infinite_buffer = 0;
284 static enum ShowMode show_mode = SHOW_MODE_NONE;
285 static const char *audio_codec_name;
286 static const char *subtitle_codec_name;
287 static const char *video_codec_name;
288 static int rdftspeed = 20;
289 #if CONFIG_AVFILTER
290 static char *vfilters = NULL;
291 #endif
292
293 /* current context */
294 static int is_full_screen;
295 static int64_t audio_callback_time;
296
297 static AVPacket flush_pkt;
298
299 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
300 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
301 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
302
303 static SDL_Surface *screen;
304
305 void av_noreturn exit_program(int ret)
306 {
307     exit(ret);
308 }
309
310 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
311 {
312     AVPacketList *pkt1;
313
314     if (q->abort_request)
315        return -1;
316
317     pkt1 = av_malloc(sizeof(AVPacketList));
318     if (!pkt1)
319         return -1;
320     pkt1->pkt = *pkt;
321     pkt1->next = NULL;
322
323     if (!q->last_pkt)
324         q->first_pkt = pkt1;
325     else
326         q->last_pkt->next = pkt1;
327     q->last_pkt = pkt1;
328     q->nb_packets++;
329     q->size += pkt1->pkt.size + sizeof(*pkt1);
330     /* XXX: should duplicate packet data in DV case */
331     SDL_CondSignal(q->cond);
332     return 0;
333 }
334
335 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
336 {
337     int ret;
338
339     /* duplicate the packet */
340     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
341         return -1;
342
343     SDL_LockMutex(q->mutex);
344     ret = packet_queue_put_private(q, pkt);
345     SDL_UnlockMutex(q->mutex);
346
347     if (pkt != &flush_pkt && ret < 0)
348         av_free_packet(pkt);
349
350     return ret;
351 }
352
353 /* packet queue handling */
354 static void packet_queue_init(PacketQueue *q)
355 {
356     memset(q, 0, sizeof(PacketQueue));
357     q->mutex = SDL_CreateMutex();
358     q->cond = SDL_CreateCond();
359     q->abort_request = 1;
360 }
361
362 static void packet_queue_flush(PacketQueue *q)
363 {
364     AVPacketList *pkt, *pkt1;
365
366     SDL_LockMutex(q->mutex);
367     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
368         pkt1 = pkt->next;
369         av_free_packet(&pkt->pkt);
370         av_freep(&pkt);
371     }
372     q->last_pkt = NULL;
373     q->first_pkt = NULL;
374     q->nb_packets = 0;
375     q->size = 0;
376     SDL_UnlockMutex(q->mutex);
377 }
378
379 static void packet_queue_destroy(PacketQueue *q)
380 {
381     packet_queue_flush(q);
382     SDL_DestroyMutex(q->mutex);
383     SDL_DestroyCond(q->cond);
384 }
385
386 static void packet_queue_abort(PacketQueue *q)
387 {
388     SDL_LockMutex(q->mutex);
389
390     q->abort_request = 1;
391
392     SDL_CondSignal(q->cond);
393
394     SDL_UnlockMutex(q->mutex);
395 }
396
397 static void packet_queue_start(PacketQueue *q)
398 {
399     SDL_LockMutex(q->mutex);
400     q->abort_request = 0;
401     packet_queue_put_private(q, &flush_pkt);
402     SDL_UnlockMutex(q->mutex);
403 }
404
405 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
406 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
407 {
408     AVPacketList *pkt1;
409     int ret;
410
411     SDL_LockMutex(q->mutex);
412
413     for (;;) {
414         if (q->abort_request) {
415             ret = -1;
416             break;
417         }
418
419         pkt1 = q->first_pkt;
420         if (pkt1) {
421             q->first_pkt = pkt1->next;
422             if (!q->first_pkt)
423                 q->last_pkt = NULL;
424             q->nb_packets--;
425             q->size -= pkt1->pkt.size + sizeof(*pkt1);
426             *pkt = pkt1->pkt;
427             av_free(pkt1);
428             ret = 1;
429             break;
430         } else if (!block) {
431             ret = 0;
432             break;
433         } else {
434             SDL_CondWait(q->cond, q->mutex);
435         }
436     }
437     SDL_UnlockMutex(q->mutex);
438     return ret;
439 }
440
441 static inline void fill_rectangle(SDL_Surface *screen,
442                                   int x, int y, int w, int h, int color)
443 {
444     SDL_Rect rect;
445     rect.x = x;
446     rect.y = y;
447     rect.w = w;
448     rect.h = h;
449     SDL_FillRect(screen, &rect, color);
450 }
451
452 #define ALPHA_BLEND(a, oldp, newp, s)\
453 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
454
455 #define RGBA_IN(r, g, b, a, s)\
456 {\
457     unsigned int v = ((const uint32_t *)(s))[0];\
458     a = (v >> 24) & 0xff;\
459     r = (v >> 16) & 0xff;\
460     g = (v >> 8) & 0xff;\
461     b = v & 0xff;\
462 }
463
464 #define YUVA_IN(y, u, v, a, s, pal)\
465 {\
466     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
467     a = (val >> 24) & 0xff;\
468     y = (val >> 16) & 0xff;\
469     u = (val >> 8) & 0xff;\
470     v = val & 0xff;\
471 }
472
473 #define YUVA_OUT(d, y, u, v, a)\
474 {\
475     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
476 }
477
478
479 #define BPP 1
480
481 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
482 {
483     int wrap, wrap3, width2, skip2;
484     int y, u, v, a, u1, v1, a1, w, h;
485     uint8_t *lum, *cb, *cr;
486     const uint8_t *p;
487     const uint32_t *pal;
488     int dstx, dsty, dstw, dsth;
489
490     dstw = av_clip(rect->w, 0, imgw);
491     dsth = av_clip(rect->h, 0, imgh);
492     dstx = av_clip(rect->x, 0, imgw - dstw);
493     dsty = av_clip(rect->y, 0, imgh - dsth);
494     lum = dst->data[0] + dsty * dst->linesize[0];
495     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
496     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
497
498     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
499     skip2 = dstx >> 1;
500     wrap = dst->linesize[0];
501     wrap3 = rect->pict.linesize[0];
502     p = rect->pict.data[0];
503     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
504
505     if (dsty & 1) {
506         lum += dstx;
507         cb += skip2;
508         cr += skip2;
509
510         if (dstx & 1) {
511             YUVA_IN(y, u, v, a, p, pal);
512             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
514             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
515             cb++;
516             cr++;
517             lum++;
518             p += BPP;
519         }
520         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
521             YUVA_IN(y, u, v, a, p, pal);
522             u1 = u;
523             v1 = v;
524             a1 = a;
525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526
527             YUVA_IN(y, u, v, a, p + BPP, pal);
528             u1 += u;
529             v1 += v;
530             a1 += a;
531             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
532             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
533             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
534             cb++;
535             cr++;
536             p += 2 * BPP;
537             lum += 2;
538         }
539         if (w) {
540             YUVA_IN(y, u, v, a, p, pal);
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
543             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
544             p++;
545             lum++;
546         }
547         p += wrap3 - dstw * BPP;
548         lum += wrap - dstw - dstx;
549         cb += dst->linesize[1] - width2 - skip2;
550         cr += dst->linesize[2] - width2 - skip2;
551     }
552     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
553         lum += dstx;
554         cb += skip2;
555         cr += skip2;
556
557         if (dstx & 1) {
558             YUVA_IN(y, u, v, a, p, pal);
559             u1 = u;
560             v1 = v;
561             a1 = a;
562             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563             p += wrap3;
564             lum += wrap;
565             YUVA_IN(y, u, v, a, p, pal);
566             u1 += u;
567             v1 += v;
568             a1 += a;
569             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
571             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
572             cb++;
573             cr++;
574             p += -wrap3 + BPP;
575             lum += -wrap + 1;
576         }
577         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 = u;
580             v1 = v;
581             a1 = a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583
584             YUVA_IN(y, u, v, a, p + BPP, pal);
585             u1 += u;
586             v1 += v;
587             a1 += a;
588             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
589             p += wrap3;
590             lum += wrap;
591
592             YUVA_IN(y, u, v, a, p, pal);
593             u1 += u;
594             v1 += v;
595             a1 += a;
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
598             YUVA_IN(y, u, v, a, p + BPP, pal);
599             u1 += u;
600             v1 += v;
601             a1 += a;
602             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
603
604             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
605             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
606
607             cb++;
608             cr++;
609             p += -wrap3 + 2 * BPP;
610             lum += -wrap + 2;
611         }
612         if (w) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618             p += wrap3;
619             lum += wrap;
620             YUVA_IN(y, u, v, a, p, pal);
621             u1 += u;
622             v1 += v;
623             a1 += a;
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
626             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
627             cb++;
628             cr++;
629             p += -wrap3 + BPP;
630             lum += -wrap + 1;
631         }
632         p += wrap3 + (wrap3 - dstw * BPP);
633         lum += wrap + (wrap - dstw - dstx);
634         cb += dst->linesize[1] - width2 - skip2;
635         cr += dst->linesize[2] - width2 - skip2;
636     }
637     /* handle odd height */
638     if (h) {
639         lum += dstx;
640         cb += skip2;
641         cr += skip2;
642
643         if (dstx & 1) {
644             YUVA_IN(y, u, v, a, p, pal);
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
647             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
648             cb++;
649             cr++;
650             lum++;
651             p += BPP;
652         }
653         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
654             YUVA_IN(y, u, v, a, p, pal);
655             u1 = u;
656             v1 = v;
657             a1 = a;
658             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
659
660             YUVA_IN(y, u, v, a, p + BPP, pal);
661             u1 += u;
662             v1 += v;
663             a1 += a;
664             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
665             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
666             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
667             cb++;
668             cr++;
669             p += 2 * BPP;
670             lum += 2;
671         }
672         if (w) {
673             YUVA_IN(y, u, v, a, p, pal);
674             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
675             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
676             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
677         }
678     }
679 }
680
681 static void free_subpicture(SubPicture *sp)
682 {
683     avsubtitle_free(&sp->sub);
684 }
685
686 static void video_image_display(VideoState *is)
687 {
688     VideoPicture *vp;
689     SubPicture *sp;
690     AVPicture pict;
691     float aspect_ratio;
692     int width, height, x, y;
693     SDL_Rect rect;
694     int i;
695
696     vp = &is->pictq[is->pictq_rindex];
697     if (vp->bmp) {
698         if (vp->sample_aspect_ratio.num == 0)
699             aspect_ratio = 0;
700         else
701             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
702
703         if (aspect_ratio <= 0.0)
704             aspect_ratio = 1.0;
705         aspect_ratio *= (float)vp->width / (float)vp->height;
706
707         if (is->subtitle_st) {
708             if (is->subpq_size > 0) {
709                 sp = &is->subpq[is->subpq_rindex];
710
711                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
712                     SDL_LockYUVOverlay (vp->bmp);
713
714                     pict.data[0] = vp->bmp->pixels[0];
715                     pict.data[1] = vp->bmp->pixels[2];
716                     pict.data[2] = vp->bmp->pixels[1];
717
718                     pict.linesize[0] = vp->bmp->pitches[0];
719                     pict.linesize[1] = vp->bmp->pitches[2];
720                     pict.linesize[2] = vp->bmp->pitches[1];
721
722                     for (i = 0; i < sp->sub.num_rects; i++)
723                         blend_subrect(&pict, sp->sub.rects[i],
724                                       vp->bmp->w, vp->bmp->h);
725
726                     SDL_UnlockYUVOverlay (vp->bmp);
727                 }
728             }
729         }
730
731
732         /* XXX: we suppose the screen has a 1.0 pixel ratio */
733         height = is->height;
734         width = ((int)rint(height * aspect_ratio)) & ~1;
735         if (width > is->width) {
736             width = is->width;
737             height = ((int)rint(width / aspect_ratio)) & ~1;
738         }
739         x = (is->width - width) / 2;
740         y = (is->height - height) / 2;
741         is->no_background = 0;
742         rect.x = is->xleft + x;
743         rect.y = is->ytop  + y;
744         rect.w = FFMAX(width,  1);
745         rect.h = FFMAX(height, 1);
746         SDL_DisplayYUVOverlay(vp->bmp, &rect);
747     }
748 }
749
750 static inline int compute_mod(int a, int b)
751 {
752     return a < 0 ? a%b + b : a%b;
753 }
754
755 static void video_audio_display(VideoState *s)
756 {
757     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
758     int ch, channels, h, h2, bgcolor, fgcolor;
759     int16_t time_diff;
760     int rdft_bits, nb_freq;
761
762     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
763         ;
764     nb_freq = 1 << (rdft_bits - 1);
765
766     /* compute display index : center on currently output samples */
767     channels = s->audio_tgt.channels;
768     nb_display_channels = channels;
769     if (!s->paused) {
770         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
771         n = 2 * channels;
772         delay = s->audio_write_buf_size;
773         delay /= n;
774
775         /* to be more precise, we take into account the time spent since
776            the last buffer computation */
777         if (audio_callback_time) {
778             time_diff = av_gettime() - audio_callback_time;
779             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
780         }
781
782         delay += 2 * data_used;
783         if (delay < data_used)
784             delay = data_used;
785
786         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
787         if (s->show_mode == SHOW_MODE_WAVES) {
788             h = INT_MIN;
789             for (i = 0; i < 1000; i += channels) {
790                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
791                 int a = s->sample_array[idx];
792                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
793                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
794                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
795                 int score = a - d;
796                 if (h < score && (b ^ c) < 0) {
797                     h = score;
798                     i_start = idx;
799                 }
800             }
801         }
802
803         s->last_i_start = i_start;
804     } else {
805         i_start = s->last_i_start;
806     }
807
808     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
809     if (s->show_mode == SHOW_MODE_WAVES) {
810         fill_rectangle(screen,
811                        s->xleft, s->ytop, s->width, s->height,
812                        bgcolor);
813
814         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
815
816         /* total height for one channel */
817         h = s->height / nb_display_channels;
818         /* graph height / 2 */
819         h2 = (h * 9) / 20;
820         for (ch = 0; ch < nb_display_channels; ch++) {
821             i = i_start + ch;
822             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
823             for (x = 0; x < s->width; x++) {
824                 y = (s->sample_array[i] * h2) >> 15;
825                 if (y < 0) {
826                     y = -y;
827                     ys = y1 - y;
828                 } else {
829                     ys = y1;
830                 }
831                 fill_rectangle(screen,
832                                s->xleft + x, ys, 1, y,
833                                fgcolor);
834                 i += channels;
835                 if (i >= SAMPLE_ARRAY_SIZE)
836                     i -= SAMPLE_ARRAY_SIZE;
837             }
838         }
839
840         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
841
842         for (ch = 1; ch < nb_display_channels; ch++) {
843             y = s->ytop + ch * h;
844             fill_rectangle(screen,
845                            s->xleft, y, s->width, 1,
846                            fgcolor);
847         }
848         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
849     } else {
850         nb_display_channels= FFMIN(nb_display_channels, 2);
851         if (rdft_bits != s->rdft_bits) {
852             av_rdft_end(s->rdft);
853             av_free(s->rdft_data);
854             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
855             s->rdft_bits = rdft_bits;
856             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
857         }
858         {
859             FFTSample *data[2];
860             for (ch = 0; ch < nb_display_channels; ch++) {
861                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
862                 i = i_start + ch;
863                 for (x = 0; x < 2 * nb_freq; x++) {
864                     double w = (x-nb_freq) * (1.0 / nb_freq);
865                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
866                     i += channels;
867                     if (i >= SAMPLE_ARRAY_SIZE)
868                         i -= SAMPLE_ARRAY_SIZE;
869                 }
870                 av_rdft_calc(s->rdft, data[ch]);
871             }
872             // least efficient way to do this, we should of course directly access it but its more than fast enough
873             for (y = 0; y < s->height; y++) {
874                 double w = 1 / sqrt(nb_freq);
875                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
876                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
877                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
878                 a = FFMIN(a, 255);
879                 b = FFMIN(b, 255);
880                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
881
882                 fill_rectangle(screen,
883                             s->xpos, s->height-y, 1, 1,
884                             fgcolor);
885             }
886         }
887         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
888         if (!s->paused)
889             s->xpos++;
890         if (s->xpos >= s->width)
891             s->xpos= s->xleft;
892     }
893 }
894
895 static void stream_close(VideoState *is)
896 {
897     VideoPicture *vp;
898     int i;
899     /* XXX: use a special url_shutdown call to abort parse cleanly */
900     is->abort_request = 1;
901     SDL_WaitThread(is->read_tid, NULL);
902     SDL_WaitThread(is->refresh_tid, NULL);
903     packet_queue_destroy(&is->videoq);
904     packet_queue_destroy(&is->audioq);
905     packet_queue_destroy(&is->subtitleq);
906
907     /* free all pictures */
908     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
909         vp = &is->pictq[i];
910 #if CONFIG_AVFILTER
911         avfilter_unref_bufferp(&vp->picref);
912 #endif
913         if (vp->bmp) {
914             SDL_FreeYUVOverlay(vp->bmp);
915             vp->bmp = NULL;
916         }
917     }
918     SDL_DestroyMutex(is->pictq_mutex);
919     SDL_DestroyCond(is->pictq_cond);
920     SDL_DestroyMutex(is->subpq_mutex);
921     SDL_DestroyCond(is->subpq_cond);
922 #if !CONFIG_AVFILTER
923     if (is->img_convert_ctx)
924         sws_freeContext(is->img_convert_ctx);
925 #endif
926     av_free(is);
927 }
928
929 static void do_exit(VideoState *is)
930 {
931     if (is) {
932         stream_close(is);
933     }
934     av_lockmgr_register(NULL);
935     uninit_opts();
936 #if CONFIG_AVFILTER
937     avfilter_uninit();
938 #endif
939     avformat_network_deinit();
940     if (show_status)
941         printf("\n");
942     SDL_Quit();
943     av_log(NULL, AV_LOG_QUIET, "%s", "");
944     exit(0);
945 }
946
947 static void sigterm_handler(int sig)
948 {
949     exit(123);
950 }
951
952 static int video_open(VideoState *is, int force_set_video_mode)
953 {
954     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
955     int w,h;
956     VideoPicture *vp = &is->pictq[is->pictq_rindex];
957
958     if (is_full_screen) flags |= SDL_FULLSCREEN;
959     else                flags |= SDL_RESIZABLE;
960
961     if (is_full_screen && fs_screen_width) {
962         w = fs_screen_width;
963         h = fs_screen_height;
964     } else if (!is_full_screen && screen_width) {
965         w = screen_width;
966         h = screen_height;
967     } else if (vp->width) {
968         w = vp->width;
969         h = vp->height;
970     } else {
971         w = 640;
972         h = 480;
973     }
974     if (screen && is->width == screen->w && screen->w == w
975        && is->height== screen->h && screen->h == h && !force_set_video_mode)
976         return 0;
977     screen = SDL_SetVideoMode(w, h, 0, flags);
978     if (!screen) {
979         fprintf(stderr, "SDL: could not set video mode - exiting\n");
980         do_exit(is);
981     }
982     if (!window_title)
983         window_title = input_filename;
984     SDL_WM_SetCaption(window_title, window_title);
985
986     is->width  = screen->w;
987     is->height = screen->h;
988
989     return 0;
990 }
991
992 /* display the current picture, if any */
993 static void video_display(VideoState *is)
994 {
995     if (!screen)
996         video_open(is, 0);
997     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
998         video_audio_display(is);
999     else if (is->video_st)
1000         video_image_display(is);
1001 }
1002
1003 static int refresh_thread(void *opaque)
1004 {
1005     VideoState *is= opaque;
1006     while (!is->abort_request) {
1007         SDL_Event event;
1008         event.type = FF_REFRESH_EVENT;
1009         event.user.data1 = opaque;
1010         if (!is->refresh && (!is->paused || is->force_refresh)) {
1011             is->refresh = 1;
1012             SDL_PushEvent(&event);
1013         }
1014         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1015         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1016     }
1017     return 0;
1018 }
1019
1020 /* get the current audio clock value */
1021 static double get_audio_clock(VideoState *is)
1022 {
1023     if (is->paused) {
1024         return is->audio_current_pts;
1025     } else {
1026         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1027     }
1028 }
1029
1030 /* get the current video clock value */
1031 static double get_video_clock(VideoState *is)
1032 {
1033     if (is->paused) {
1034         return is->video_current_pts;
1035     } else {
1036         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1037     }
1038 }
1039
1040 /* get the current external clock value */
1041 static double get_external_clock(VideoState *is)
1042 {
1043     int64_t ti;
1044     ti = av_gettime();
1045     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1046 }
1047
1048 /* get the current master clock value */
1049 static double get_master_clock(VideoState *is)
1050 {
1051     double val;
1052
1053     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1054         if (is->video_st)
1055             val = get_video_clock(is);
1056         else
1057             val = get_audio_clock(is);
1058     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1059         if (is->audio_st)
1060             val = get_audio_clock(is);
1061         else
1062             val = get_video_clock(is);
1063     } else {
1064         val = get_external_clock(is);
1065     }
1066     return val;
1067 }
1068
1069 /* seek in the stream */
1070 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1071 {
1072     if (!is->seek_req) {
1073         is->seek_pos = pos;
1074         is->seek_rel = rel;
1075         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1076         if (seek_by_bytes)
1077             is->seek_flags |= AVSEEK_FLAG_BYTE;
1078         is->seek_req = 1;
1079     }
1080 }
1081
1082 /* pause or resume the video */
1083 static void stream_toggle_pause(VideoState *is)
1084 {
1085     if (is->paused) {
1086         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1087         if (is->read_pause_return != AVERROR(ENOSYS)) {
1088             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1089         }
1090         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1091     }
1092     is->paused = !is->paused;
1093 }
1094
1095 static double compute_target_delay(double delay, VideoState *is)
1096 {
1097     double sync_threshold, diff;
1098
1099     /* update delay to follow master synchronisation source */
1100     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1101          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1102         /* if video is slave, we try to correct big delays by
1103            duplicating or deleting a frame */
1104         diff = get_video_clock(is) - get_master_clock(is);
1105
1106         /* skip or repeat frame. We take into account the
1107            delay to compute the threshold. I still don't know
1108            if it is the best guess */
1109         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1110         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1111             if (diff <= -sync_threshold)
1112                 delay = 0;
1113             else if (diff >= sync_threshold)
1114                 delay = 2 * delay;
1115         }
1116     }
1117
1118     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1119             delay, -diff);
1120
1121     return delay;
1122 }
1123
1124 static void pictq_next_picture(VideoState *is) {
1125     /* update queue size and signal for next picture */
1126     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1127         is->pictq_rindex = 0;
1128
1129     SDL_LockMutex(is->pictq_mutex);
1130     is->pictq_size--;
1131     SDL_CondSignal(is->pictq_cond);
1132     SDL_UnlockMutex(is->pictq_mutex);
1133 }
1134
1135 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1136     double time = av_gettime() / 1000000.0;
1137     /* update current video pts */
1138     is->video_current_pts = pts;
1139     is->video_current_pts_drift = is->video_current_pts - time;
1140     is->video_current_pos = pos;
1141     is->frame_last_pts = pts;
1142 }
1143
1144 /* called to display each frame */
1145 static void video_refresh(void *opaque)
1146 {
1147     VideoState *is = opaque;
1148     VideoPicture *vp;
1149     double time;
1150
1151     SubPicture *sp, *sp2;
1152
1153     if (is->video_st) {
1154 retry:
1155         if (is->pictq_size == 0) {
1156             SDL_LockMutex(is->pictq_mutex);
1157             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1158                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1159                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1160             }
1161             SDL_UnlockMutex(is->pictq_mutex);
1162             // nothing to do, no picture to display in the que
1163         } else {
1164             double last_duration, duration, delay;
1165             /* dequeue the picture */
1166             vp = &is->pictq[is->pictq_rindex];
1167
1168             if (vp->skip) {
1169                 pictq_next_picture(is);
1170                 goto retry;
1171             }
1172
1173             if (is->paused)
1174                 goto display;
1175
1176             /* compute nominal last_duration */
1177             last_duration = vp->pts - is->frame_last_pts;
1178             if (last_duration > 0 && last_duration < 10.0) {
1179                 /* if duration of the last frame was sane, update last_duration in video state */
1180                 is->frame_last_duration = last_duration;
1181             }
1182             delay = compute_target_delay(is->frame_last_duration, is);
1183
1184             time= av_gettime()/1000000.0;
1185             if (time < is->frame_timer + delay)
1186                 return;
1187
1188             if (delay > 0)
1189                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1190
1191             SDL_LockMutex(is->pictq_mutex);
1192             update_video_pts(is, vp->pts, vp->pos);
1193             SDL_UnlockMutex(is->pictq_mutex);
1194
1195             if (is->pictq_size > 1) {
1196                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1197                 duration = nextvp->pts - vp->pts;
1198                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1199                     is->frame_drops_late++;
1200                     pictq_next_picture(is);
1201                     goto retry;
1202                 }
1203             }
1204
1205             if (is->subtitle_st) {
1206                 if (is->subtitle_stream_changed) {
1207                     SDL_LockMutex(is->subpq_mutex);
1208
1209                     while (is->subpq_size) {
1210                         free_subpicture(&is->subpq[is->subpq_rindex]);
1211
1212                         /* update queue size and signal for next picture */
1213                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1214                             is->subpq_rindex = 0;
1215
1216                         is->subpq_size--;
1217                     }
1218                     is->subtitle_stream_changed = 0;
1219
1220                     SDL_CondSignal(is->subpq_cond);
1221                     SDL_UnlockMutex(is->subpq_mutex);
1222                 } else {
1223                     if (is->subpq_size > 0) {
1224                         sp = &is->subpq[is->subpq_rindex];
1225
1226                         if (is->subpq_size > 1)
1227                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1228                         else
1229                             sp2 = NULL;
1230
1231                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1232                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1233                         {
1234                             free_subpicture(sp);
1235
1236                             /* update queue size and signal for next picture */
1237                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1238                                 is->subpq_rindex = 0;
1239
1240                             SDL_LockMutex(is->subpq_mutex);
1241                             is->subpq_size--;
1242                             SDL_CondSignal(is->subpq_cond);
1243                             SDL_UnlockMutex(is->subpq_mutex);
1244                         }
1245                     }
1246                 }
1247             }
1248
1249 display:
1250             /* display picture */
1251             if (!display_disable)
1252                 video_display(is);
1253
1254             if (!is->paused)
1255                 pictq_next_picture(is);
1256         }
1257     } else if (is->audio_st) {
1258         /* draw the next audio frame */
1259
1260         /* if only audio stream, then display the audio bars (better
1261            than nothing, just to test the implementation */
1262
1263         /* display picture */
1264         if (!display_disable)
1265             video_display(is);
1266     }
1267     is->force_refresh = 0;
1268     if (show_status) {
1269         static int64_t last_time;
1270         int64_t cur_time;
1271         int aqsize, vqsize, sqsize;
1272         double av_diff;
1273
1274         cur_time = av_gettime();
1275         if (!last_time || (cur_time - last_time) >= 30000) {
1276             aqsize = 0;
1277             vqsize = 0;
1278             sqsize = 0;
1279             if (is->audio_st)
1280                 aqsize = is->audioq.size;
1281             if (is->video_st)
1282                 vqsize = is->videoq.size;
1283             if (is->subtitle_st)
1284                 sqsize = is->subtitleq.size;
1285             av_diff = 0;
1286             if (is->audio_st && is->video_st)
1287                 av_diff = get_audio_clock(is) - get_video_clock(is);
1288             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289                    get_master_clock(is),
1290                    av_diff,
1291                    is->frame_drops_early + is->frame_drops_late,
1292                    aqsize / 1024,
1293                    vqsize / 1024,
1294                    sqsize,
1295                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1296                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1297             fflush(stdout);
1298             last_time = cur_time;
1299         }
1300     }
1301 }
1302
1303 /* allocate a picture (needs to do that in main thread to avoid
1304    potential locking problems */
1305 static void alloc_picture(AllocEventProps *event_props)
1306 {
1307     VideoState *is = event_props->is;
1308     AVFrame *frame = event_props->frame;
1309     VideoPicture *vp;
1310
1311     vp = &is->pictq[is->pictq_windex];
1312
1313     if (vp->bmp)
1314         SDL_FreeYUVOverlay(vp->bmp);
1315
1316 #if CONFIG_AVFILTER
1317     avfilter_unref_bufferp(&vp->picref);
1318 #endif
1319
1320     vp->width   = frame->width;
1321     vp->height  = frame->height;
1322
1323     video_open(event_props->is, 0);
1324
1325     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1326                                    SDL_YV12_OVERLAY,
1327                                    screen);
1328     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1329         /* SDL allocates a buffer smaller than requested if the video
1330          * overlay hardware is unable to support the requested size. */
1331         fprintf(stderr, "Error: the video system does not support an image\n"
1332                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1333                         "to reduce the image size.\n", vp->width, vp->height );
1334         do_exit(is);
1335     }
1336
1337     SDL_LockMutex(is->pictq_mutex);
1338     vp->allocated = 1;
1339     SDL_CondSignal(is->pictq_cond);
1340     SDL_UnlockMutex(is->pictq_mutex);
1341 }
1342
1343 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1344 {
1345     VideoPicture *vp;
1346     double frame_delay, pts = pts1;
1347
1348     /* compute the exact PTS for the picture if it is omitted in the stream
1349      * pts1 is the dts of the pkt / pts of the frame */
1350     if (pts != 0) {
1351         /* update video clock with pts, if present */
1352         is->video_clock = pts;
1353     } else {
1354         pts = is->video_clock;
1355     }
1356     /* update video clock for next frame */
1357     frame_delay = av_q2d(is->video_st->codec->time_base);
1358     /* for MPEG2, the frame can be repeated, so we update the
1359        clock accordingly */
1360     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1361     is->video_clock += frame_delay;
1362
1363 #if defined(DEBUG_SYNC) && 0
1364     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1365            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1366 #endif
1367
1368     /* wait until we have space to put a new picture */
1369     SDL_LockMutex(is->pictq_mutex);
1370
1371     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1372            !is->videoq.abort_request) {
1373         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1374     }
1375     SDL_UnlockMutex(is->pictq_mutex);
1376
1377     if (is->videoq.abort_request)
1378         return -1;
1379
1380     vp = &is->pictq[is->pictq_windex];
1381
1382     /* alloc or resize hardware picture buffer */
1383     if (!vp->bmp || vp->reallocate ||
1384         vp->width  != src_frame->width ||
1385         vp->height != src_frame->height) {
1386         SDL_Event event;
1387         AllocEventProps event_props;
1388
1389         event_props.frame = src_frame;
1390         event_props.is = is;
1391
1392         vp->allocated  = 0;
1393         vp->reallocate = 0;
1394
1395         /* the allocation must be done in the main thread to avoid
1396            locking problems. We wait in this block for the event to complete,
1397            so we can pass a pointer to event_props to it. */
1398         event.type = FF_ALLOC_EVENT;
1399         event.user.data1 = &event_props;
1400         SDL_PushEvent(&event);
1401
1402         /* wait until the picture is allocated */
1403         SDL_LockMutex(is->pictq_mutex);
1404         while (!vp->allocated && !is->videoq.abort_request) {
1405             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1406         }
1407         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1408         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1409             while (!vp->allocated) {
1410                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1411             }
1412         }
1413         SDL_UnlockMutex(is->pictq_mutex);
1414
1415         if (is->videoq.abort_request)
1416             return -1;
1417     }
1418
1419     /* if the frame is not skipped, then display it */
1420     if (vp->bmp) {
1421         AVPicture pict = { { 0 } };
1422 #if CONFIG_AVFILTER
1423         avfilter_unref_bufferp(&vp->picref);
1424         vp->picref = src_frame->opaque;
1425 #endif
1426
1427         /* get a pointer on the bitmap */
1428         SDL_LockYUVOverlay (vp->bmp);
1429
1430         pict.data[0] = vp->bmp->pixels[0];
1431         pict.data[1] = vp->bmp->pixels[2];
1432         pict.data[2] = vp->bmp->pixels[1];
1433
1434         pict.linesize[0] = vp->bmp->pitches[0];
1435         pict.linesize[1] = vp->bmp->pitches[2];
1436         pict.linesize[2] = vp->bmp->pitches[1];
1437
1438 #if CONFIG_AVFILTER
1439         // FIXME use direct rendering
1440         av_picture_copy(&pict, (AVPicture *)src_frame,
1441                         src_frame->format, vp->width, vp->height);
1442         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1443 #else
1444         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1445         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1446             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1447             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1448         if (is->img_convert_ctx == NULL) {
1449             fprintf(stderr, "Cannot initialize the conversion context\n");
1450             exit(1);
1451         }
1452         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1453                   0, vp->height, pict.data, pict.linesize);
1454         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1455 #endif
1456         /* update the bitmap content */
1457         SDL_UnlockYUVOverlay(vp->bmp);
1458
1459         vp->pts = pts;
1460         vp->pos = pos;
1461         vp->skip = 0;
1462
1463         /* now we can update the picture count */
1464         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1465             is->pictq_windex = 0;
1466         SDL_LockMutex(is->pictq_mutex);
1467         is->pictq_size++;
1468         SDL_UnlockMutex(is->pictq_mutex);
1469     }
1470     return 0;
1471 }
1472
1473 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1474 {
1475     int got_picture, i;
1476
1477     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1478         return -1;
1479
1480     if (pkt->data == flush_pkt.data) {
1481         avcodec_flush_buffers(is->video_st->codec);
1482
1483         SDL_LockMutex(is->pictq_mutex);
1484         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1485         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1486             is->pictq[i].skip = 1;
1487         }
1488         while (is->pictq_size && !is->videoq.abort_request) {
1489             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1490         }
1491         is->video_current_pos = -1;
1492         is->frame_last_pts = AV_NOPTS_VALUE;
1493         is->frame_last_duration = 0;
1494         is->frame_timer = (double)av_gettime() / 1000000.0;
1495         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1496         SDL_UnlockMutex(is->pictq_mutex);
1497
1498         return 0;
1499     }
1500
1501     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1502         return 0;
1503
1504     if (got_picture) {
1505         int ret = 1;
1506
1507         if (decoder_reorder_pts == -1) {
1508             *pts = av_frame_get_best_effort_timestamp(frame);
1509         } else if (decoder_reorder_pts) {
1510             *pts = frame->pkt_pts;
1511         } else {
1512             *pts = frame->pkt_dts;
1513         }
1514
1515         if (*pts == AV_NOPTS_VALUE) {
1516             *pts = 0;
1517         }
1518
1519         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1520              (framedrop>0 || (framedrop && is->audio_st))) {
1521             SDL_LockMutex(is->pictq_mutex);
1522             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1523                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1524                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1525                 double ptsdiff = dpts - is->frame_last_pts;
1526                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1527                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1528                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1529                     is->frame_last_dropped_pos = pkt->pos;
1530                     is->frame_last_dropped_pts = dpts;
1531                     is->frame_drops_early++;
1532                     ret = 0;
1533                 }
1534             }
1535             SDL_UnlockMutex(is->pictq_mutex);
1536         }
1537
1538         return ret;
1539     }
1540     return 0;
1541 }
1542
1543 #if CONFIG_AVFILTER
1544 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1545                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1546 {
1547     int ret;
1548     AVFilterInOut *outputs = NULL, *inputs = NULL;
1549
1550     if (filtergraph) {
1551         outputs = avfilter_inout_alloc();
1552         inputs  = avfilter_inout_alloc();
1553         if (!outputs || !inputs) {
1554             ret = AVERROR(ENOMEM);
1555             goto fail;
1556         }
1557
1558         outputs->name       = av_strdup("in");
1559         outputs->filter_ctx = source_ctx;
1560         outputs->pad_idx    = 0;
1561         outputs->next       = NULL;
1562
1563         inputs->name        = av_strdup("out");
1564         inputs->filter_ctx  = sink_ctx;
1565         inputs->pad_idx     = 0;
1566         inputs->next        = NULL;
1567
1568         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1569             goto fail;
1570     } else {
1571         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1572             goto fail;
1573     }
1574
1575     return avfilter_graph_config(graph, NULL);
1576 fail:
1577     avfilter_inout_free(&outputs);
1578     avfilter_inout_free(&inputs);
1579     return ret;
1580 }
1581
1582 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1583 {
1584     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1585     char sws_flags_str[128];
1586     char buffersrc_args[256];
1587     int ret;
1588     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1589     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format, *filt_crop;
1590     AVCodecContext *codec = is->video_st->codec;
1591
1592     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1593     graph->scale_sws_opts = av_strdup(sws_flags_str);
1594
1595     snprintf(buffersrc_args, sizeof(buffersrc_args),
1596              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1597              codec->width, codec->height, codec->pix_fmt,
1598              is->video_st->time_base.num, is->video_st->time_base.den,
1599              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1600
1601     if ((ret = avfilter_graph_create_filter(&filt_src,
1602                                             avfilter_get_by_name("buffer"),
1603                                             "ffplay_buffer", buffersrc_args, NULL,
1604                                             graph)) < 0)
1605         return ret;
1606
1607     buffersink_params->pixel_fmts = pix_fmts;
1608     ret = avfilter_graph_create_filter(&filt_out,
1609                                        avfilter_get_by_name("buffersink"),
1610                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1611     av_freep(&buffersink_params);
1612     if (ret < 0)
1613         return ret;
1614
1615     /* SDL YUV code is not handling odd width/height for some driver
1616      * combinations, therefore we crop the picture to an even width/height. */
1617     if ((ret = avfilter_graph_create_filter(&filt_crop,
1618                                             avfilter_get_by_name("crop"),
1619                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1620         return ret;
1621     if ((ret = avfilter_graph_create_filter(&filt_format,
1622                                             avfilter_get_by_name("format"),
1623                                             "format", "yuv420p", NULL, graph)) < 0)
1624         return ret;
1625     if ((ret = avfilter_link(filt_crop, 0, filt_format, 0)) < 0)
1626         return ret;
1627     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1628         return ret;
1629
1630     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1631         return ret;
1632
1633     is->in_video_filter  = filt_src;
1634     is->out_video_filter = filt_out;
1635
1636     return ret;
1637 }
1638
1639 #endif  /* CONFIG_AVFILTER */
1640
1641 static int video_thread(void *arg)
1642 {
1643     AVPacket pkt = { 0 };
1644     VideoState *is = arg;
1645     AVFrame *frame = avcodec_alloc_frame();
1646     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1647     double pts;
1648     int ret;
1649
1650 #if CONFIG_AVFILTER
1651     AVCodecContext *codec = is->video_st->codec;
1652     AVFilterGraph *graph = avfilter_graph_alloc();
1653     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1654     int last_w = 0;
1655     int last_h = 0;
1656     enum PixelFormat last_format = -2;
1657
1658     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1659         is->use_dr1 = 1;
1660         codec->get_buffer     = codec_get_buffer;
1661         codec->release_buffer = codec_release_buffer;
1662         codec->opaque         = &is->buffer_pool;
1663     }
1664 #endif
1665
1666     for (;;) {
1667 #if CONFIG_AVFILTER
1668         AVFilterBufferRef *picref;
1669         AVRational tb;
1670 #endif
1671         while (is->paused && !is->videoq.abort_request)
1672             SDL_Delay(10);
1673
1674         avcodec_get_frame_defaults(frame);
1675         av_free_packet(&pkt);
1676
1677         ret = get_video_frame(is, frame, &pts_int, &pkt);
1678         if (ret < 0)
1679             goto the_end;
1680
1681         if (!ret)
1682             continue;
1683
1684 #if CONFIG_AVFILTER
1685         if (   last_w != is->video_st->codec->width
1686             || last_h != is->video_st->codec->height
1687             || last_format != is->video_st->codec->pix_fmt) {
1688             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1689                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1690             avfilter_graph_free(&graph);
1691             graph = avfilter_graph_alloc();
1692             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1693                 SDL_Event event;
1694                 event.type = FF_QUIT_EVENT;
1695                 event.user.data1 = is;
1696                 SDL_PushEvent(&event);
1697                 av_free_packet(&pkt);
1698                 goto the_end;
1699             }
1700             filt_in  = is->in_video_filter;
1701             filt_out = is->out_video_filter;
1702             last_w = is->video_st->codec->width;
1703             last_h = is->video_st->codec->height;
1704             last_format = is->video_st->codec->pix_fmt;
1705         }
1706
1707         frame->pts = pts_int;
1708         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1709         if (is->use_dr1 && frame->opaque) {
1710             FrameBuffer      *buf = frame->opaque;
1711             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1712                                         frame->data, frame->linesize,
1713                                         AV_PERM_READ | AV_PERM_PRESERVE,
1714                                         frame->width, frame->height,
1715                                         frame->format);
1716
1717             avfilter_copy_frame_props(fb, frame);
1718             fb->buf->priv           = buf;
1719             fb->buf->free           = filter_release_buffer;
1720
1721             buf->refcount++;
1722             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1723
1724         } else
1725             av_buffersrc_write_frame(filt_in, frame);
1726
1727         av_free_packet(&pkt);
1728
1729         while (ret >= 0) {
1730             is->frame_last_returned_time = av_gettime() / 1000000.0;
1731
1732             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1733             if (ret < 0) {
1734                 ret = 0;
1735                 break;
1736             }
1737
1738             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1739             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1740                 is->frame_last_filter_delay = 0;
1741
1742             avfilter_copy_buf_props(frame, picref);
1743
1744             pts_int = picref->pts;
1745             tb      = filt_out->inputs[0]->time_base;
1746             pos     = picref->pos;
1747             frame->opaque = picref;
1748
1749             if (av_cmp_q(tb, is->video_st->time_base)) {
1750                 av_unused int64_t pts1 = pts_int;
1751                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1752                 av_dlog(NULL, "video_thread(): "
1753                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1754                         tb.num, tb.den, pts1,
1755                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1756             }
1757             pts = pts_int * av_q2d(is->video_st->time_base);
1758             ret = queue_picture(is, frame, pts, pos);
1759         }
1760 #else
1761         pts = pts_int * av_q2d(is->video_st->time_base);
1762         ret = queue_picture(is, frame, pts, pkt.pos);
1763 #endif
1764
1765         if (ret < 0)
1766             goto the_end;
1767
1768         if (is->step)
1769             stream_toggle_pause(is);
1770     }
1771  the_end:
1772     avcodec_flush_buffers(is->video_st->codec);
1773 #if CONFIG_AVFILTER
1774     av_freep(&vfilters);
1775     avfilter_graph_free(&graph);
1776 #endif
1777     av_free_packet(&pkt);
1778     av_free(frame);
1779     return 0;
1780 }
1781
1782 static int subtitle_thread(void *arg)
1783 {
1784     VideoState *is = arg;
1785     SubPicture *sp;
1786     AVPacket pkt1, *pkt = &pkt1;
1787     int got_subtitle;
1788     double pts;
1789     int i, j;
1790     int r, g, b, y, u, v, a;
1791
1792     for (;;) {
1793         while (is->paused && !is->subtitleq.abort_request) {
1794             SDL_Delay(10);
1795         }
1796         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1797             break;
1798
1799         if (pkt->data == flush_pkt.data) {
1800             avcodec_flush_buffers(is->subtitle_st->codec);
1801             continue;
1802         }
1803         SDL_LockMutex(is->subpq_mutex);
1804         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1805                !is->subtitleq.abort_request) {
1806             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1807         }
1808         SDL_UnlockMutex(is->subpq_mutex);
1809
1810         if (is->subtitleq.abort_request)
1811             return 0;
1812
1813         sp = &is->subpq[is->subpq_windex];
1814
1815        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1816            this packet, if any */
1817         pts = 0;
1818         if (pkt->pts != AV_NOPTS_VALUE)
1819             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1820
1821         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1822                                  &got_subtitle, pkt);
1823
1824         if (got_subtitle && sp->sub.format == 0) {
1825             sp->pts = pts;
1826
1827             for (i = 0; i < sp->sub.num_rects; i++)
1828             {
1829                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1830                 {
1831                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1832                     y = RGB_TO_Y_CCIR(r, g, b);
1833                     u = RGB_TO_U_CCIR(r, g, b, 0);
1834                     v = RGB_TO_V_CCIR(r, g, b, 0);
1835                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1836                 }
1837             }
1838
1839             /* now we can update the picture count */
1840             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1841                 is->subpq_windex = 0;
1842             SDL_LockMutex(is->subpq_mutex);
1843             is->subpq_size++;
1844             SDL_UnlockMutex(is->subpq_mutex);
1845         }
1846         av_free_packet(pkt);
1847     }
1848     return 0;
1849 }
1850
1851 /* copy samples for viewing in editor window */
1852 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1853 {
1854     int size, len;
1855
1856     size = samples_size / sizeof(short);
1857     while (size > 0) {
1858         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1859         if (len > size)
1860             len = size;
1861         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1862         samples += len;
1863         is->sample_array_index += len;
1864         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1865             is->sample_array_index = 0;
1866         size -= len;
1867     }
1868 }
1869
1870 /* return the wanted number of samples to get better sync if sync_type is video
1871  * or external master clock */
1872 static int synchronize_audio(VideoState *is, int nb_samples)
1873 {
1874     int wanted_nb_samples = nb_samples;
1875
1876     /* if not master, then we try to remove or add samples to correct the clock */
1877     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1878          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1879         double diff, avg_diff;
1880         int min_nb_samples, max_nb_samples;
1881
1882         diff = get_audio_clock(is) - get_master_clock(is);
1883
1884         if (diff < AV_NOSYNC_THRESHOLD) {
1885             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1886             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1887                 /* not enough measures to have a correct estimate */
1888                 is->audio_diff_avg_count++;
1889             } else {
1890                 /* estimate the A-V difference */
1891                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1892
1893                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1894                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1895                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1896                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1897                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1898                 }
1899                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1900                         diff, avg_diff, wanted_nb_samples - nb_samples,
1901                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1902             }
1903         } else {
1904             /* too big difference : may be initial PTS errors, so
1905                reset A-V filter */
1906             is->audio_diff_avg_count = 0;
1907             is->audio_diff_cum       = 0;
1908         }
1909     }
1910
1911     return wanted_nb_samples;
1912 }
1913
1914 /* decode one audio frame and returns its uncompressed size */
1915 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1916 {
1917     AVPacket *pkt_temp = &is->audio_pkt_temp;
1918     AVPacket *pkt = &is->audio_pkt;
1919     AVCodecContext *dec = is->audio_st->codec;
1920     int len1, len2, data_size, resampled_data_size;
1921     int64_t dec_channel_layout;
1922     int got_frame;
1923     double pts;
1924     int new_packet = 0;
1925     int flush_complete = 0;
1926     int wanted_nb_samples;
1927
1928     for (;;) {
1929         /* NOTE: the audio packet can contain several frames */
1930         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1931             if (!is->frame) {
1932                 if (!(is->frame = avcodec_alloc_frame()))
1933                     return AVERROR(ENOMEM);
1934             } else
1935                 avcodec_get_frame_defaults(is->frame);
1936
1937             if (is->paused)
1938                 return -1;
1939
1940             if (flush_complete)
1941                 break;
1942             new_packet = 0;
1943             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1944             if (len1 < 0) {
1945                 /* if error, we skip the frame */
1946                 pkt_temp->size = 0;
1947                 break;
1948             }
1949
1950             pkt_temp->data += len1;
1951             pkt_temp->size -= len1;
1952
1953             if (!got_frame) {
1954                 /* stop sending empty packets if the decoder is finished */
1955                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1956                     flush_complete = 1;
1957                 continue;
1958             }
1959             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1960                                                    is->frame->nb_samples,
1961                                                    dec->sample_fmt, 1);
1962
1963             dec_channel_layout =
1964                 (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ?
1965                 dec->channel_layout : av_get_default_channel_layout(dec->channels);
1966             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
1967
1968             if (dec->sample_fmt    != is->audio_src.fmt            ||
1969                 dec_channel_layout != is->audio_src.channel_layout ||
1970                 dec->sample_rate   != is->audio_src.freq           ||
1971                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
1972                 swr_free(&is->swr_ctx);
1973                 is->swr_ctx = swr_alloc_set_opts(NULL,
1974                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
1975                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
1976                                                  0, NULL);
1977                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
1978                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
1979                         dec->sample_rate,   av_get_sample_fmt_name(dec->sample_fmt),   dec->channels,
1980                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
1981                     break;
1982                 }
1983                 is->audio_src.channel_layout = dec_channel_layout;
1984                 is->audio_src.channels = dec->channels;
1985                 is->audio_src.freq = dec->sample_rate;
1986                 is->audio_src.fmt = dec->sample_fmt;
1987             }
1988
1989             if (is->swr_ctx) {
1990                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
1991                 uint8_t *out[] = {is->audio_buf2};
1992                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
1993                 if (wanted_nb_samples != is->frame->nb_samples) {
1994                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / dec->sample_rate,
1995                                                 wanted_nb_samples * is->audio_tgt.freq / dec->sample_rate) < 0) {
1996                         fprintf(stderr, "swr_set_compensation() failed\n");
1997                         break;
1998                     }
1999                 }
2000                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2001                 if (len2 < 0) {
2002                     fprintf(stderr, "swr_convert() failed\n");
2003                     break;
2004                 }
2005                 if (len2 == out_count) {
2006                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2007                     swr_init(is->swr_ctx);
2008                 }
2009                 is->audio_buf = is->audio_buf2;
2010                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2011             } else {
2012                 is->audio_buf = is->frame->data[0];
2013                 resampled_data_size = data_size;
2014             }
2015
2016             /* if no pts, then compute it */
2017             pts = is->audio_clock;
2018             *pts_ptr = pts;
2019             is->audio_clock += (double)data_size /
2020                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2021 #ifdef DEBUG
2022             {
2023                 static double last_clock;
2024                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2025                        is->audio_clock - last_clock,
2026                        is->audio_clock, pts);
2027                 last_clock = is->audio_clock;
2028             }
2029 #endif
2030             return resampled_data_size;
2031         }
2032
2033         /* free the current packet */
2034         if (pkt->data)
2035             av_free_packet(pkt);
2036         memset(pkt_temp, 0, sizeof(*pkt_temp));
2037
2038         if (is->paused || is->audioq.abort_request) {
2039             return -1;
2040         }
2041
2042         /* read next packet */
2043         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2044             return -1;
2045
2046         if (pkt->data == flush_pkt.data) {
2047             avcodec_flush_buffers(dec);
2048             flush_complete = 0;
2049         }
2050
2051         *pkt_temp = *pkt;
2052
2053         /* if update the audio clock with the pts */
2054         if (pkt->pts != AV_NOPTS_VALUE) {
2055             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2056         }
2057     }
2058 }
2059
2060 /* prepare a new audio buffer */
2061 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2062 {
2063     VideoState *is = opaque;
2064     int audio_size, len1;
2065     int bytes_per_sec;
2066     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2067     double pts;
2068
2069     audio_callback_time = av_gettime();
2070
2071     while (len > 0) {
2072         if (is->audio_buf_index >= is->audio_buf_size) {
2073            audio_size = audio_decode_frame(is, &pts);
2074            if (audio_size < 0) {
2075                 /* if error, just output silence */
2076                is->audio_buf      = is->silence_buf;
2077                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2078            } else {
2079                if (is->show_mode != SHOW_MODE_VIDEO)
2080                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2081                is->audio_buf_size = audio_size;
2082            }
2083            is->audio_buf_index = 0;
2084         }
2085         len1 = is->audio_buf_size - is->audio_buf_index;
2086         if (len1 > len)
2087             len1 = len;
2088         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2089         len -= len1;
2090         stream += len1;
2091         is->audio_buf_index += len1;
2092     }
2093     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2094     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2095     /* Let's assume the audio driver that is used by SDL has two periods. */
2096     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2097     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2098 }
2099
2100 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2101 {
2102     SDL_AudioSpec wanted_spec, spec;
2103     const char *env;
2104     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2105
2106     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2107     if (env) {
2108         wanted_nb_channels = atoi(env);
2109         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2110     }
2111     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2112         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2113         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2114     }
2115     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2116     wanted_spec.freq = wanted_sample_rate;
2117     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2118         fprintf(stderr, "Invalid sample rate or channel count!\n");
2119         return -1;
2120     }
2121     wanted_spec.format = AUDIO_S16SYS;
2122     wanted_spec.silence = 0;
2123     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2124     wanted_spec.callback = sdl_audio_callback;
2125     wanted_spec.userdata = opaque;
2126     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2127         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2128         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2129         if (!wanted_spec.channels) {
2130             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2131             return -1;
2132         }
2133         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2134     }
2135     if (spec.format != AUDIO_S16SYS) {
2136         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2137         return -1;
2138     }
2139     if (spec.channels != wanted_spec.channels) {
2140         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2141         if (!wanted_channel_layout) {
2142             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2143             return -1;
2144         }
2145     }
2146
2147     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2148     audio_hw_params->freq = spec.freq;
2149     audio_hw_params->channel_layout = wanted_channel_layout;
2150     audio_hw_params->channels =  spec.channels;
2151     return spec.size;
2152 }
2153
2154 /* open a given stream. Return 0 if OK */
2155 static int stream_component_open(VideoState *is, int stream_index)
2156 {
2157     AVFormatContext *ic = is->ic;
2158     AVCodecContext *avctx;
2159     AVCodec *codec;
2160     AVDictionary *opts;
2161     AVDictionaryEntry *t = NULL;
2162
2163     if (stream_index < 0 || stream_index >= ic->nb_streams)
2164         return -1;
2165     avctx = ic->streams[stream_index]->codec;
2166
2167     codec = avcodec_find_decoder(avctx->codec_id);
2168     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2169
2170     switch(avctx->codec_type){
2171         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2172         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2173         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2174     }
2175     if (!codec)
2176         return -1;
2177
2178     avctx->workaround_bugs   = workaround_bugs;
2179     avctx->lowres            = lowres;
2180     if(avctx->lowres > codec->max_lowres){
2181         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2182                 codec->max_lowres);
2183         avctx->lowres= codec->max_lowres;
2184     }
2185     avctx->idct_algo         = idct;
2186     avctx->skip_frame        = skip_frame;
2187     avctx->skip_idct         = skip_idct;
2188     avctx->skip_loop_filter  = skip_loop_filter;
2189     avctx->error_concealment = error_concealment;
2190
2191     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2192     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2193     if(codec->capabilities & CODEC_CAP_DR1)
2194         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2195
2196     if (!av_dict_get(opts, "threads", NULL, 0))
2197         av_dict_set(&opts, "threads", "auto", 0);
2198     if (!codec ||
2199         avcodec_open2(avctx, codec, &opts) < 0)
2200         return -1;
2201     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2202         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2203         return AVERROR_OPTION_NOT_FOUND;
2204     }
2205
2206     /* prepare audio output */
2207     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2208         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2209         if (audio_hw_buf_size < 0)
2210             return -1;
2211         is->audio_hw_buf_size = audio_hw_buf_size;
2212         is->audio_tgt = is->audio_src;
2213     }
2214
2215     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2216     switch (avctx->codec_type) {
2217     case AVMEDIA_TYPE_AUDIO:
2218         is->audio_stream = stream_index;
2219         is->audio_st = ic->streams[stream_index];
2220         is->audio_buf_size  = 0;
2221         is->audio_buf_index = 0;
2222
2223         /* init averaging filter */
2224         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2225         is->audio_diff_avg_count = 0;
2226         /* since we do not have a precise anough audio fifo fullness,
2227            we correct audio sync only if larger than this threshold */
2228         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2229
2230         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2231         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2232         packet_queue_start(&is->audioq);
2233         SDL_PauseAudio(0);
2234         break;
2235     case AVMEDIA_TYPE_VIDEO:
2236         is->video_stream = stream_index;
2237         is->video_st = ic->streams[stream_index];
2238
2239         packet_queue_start(&is->videoq);
2240         is->video_tid = SDL_CreateThread(video_thread, is);
2241         break;
2242     case AVMEDIA_TYPE_SUBTITLE:
2243         is->subtitle_stream = stream_index;
2244         is->subtitle_st = ic->streams[stream_index];
2245         packet_queue_start(&is->subtitleq);
2246
2247         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2248         break;
2249     default:
2250         break;
2251     }
2252     return 0;
2253 }
2254
2255 static void stream_component_close(VideoState *is, int stream_index)
2256 {
2257     AVFormatContext *ic = is->ic;
2258     AVCodecContext *avctx;
2259
2260     if (stream_index < 0 || stream_index >= ic->nb_streams)
2261         return;
2262     avctx = ic->streams[stream_index]->codec;
2263
2264     switch (avctx->codec_type) {
2265     case AVMEDIA_TYPE_AUDIO:
2266         packet_queue_abort(&is->audioq);
2267
2268         SDL_CloseAudio();
2269
2270         packet_queue_flush(&is->audioq);
2271         av_free_packet(&is->audio_pkt);
2272         swr_free(&is->swr_ctx);
2273         av_freep(&is->audio_buf1);
2274         is->audio_buf = NULL;
2275         av_freep(&is->frame);
2276
2277         if (is->rdft) {
2278             av_rdft_end(is->rdft);
2279             av_freep(&is->rdft_data);
2280             is->rdft = NULL;
2281             is->rdft_bits = 0;
2282         }
2283         break;
2284     case AVMEDIA_TYPE_VIDEO:
2285         packet_queue_abort(&is->videoq);
2286
2287         /* note: we also signal this mutex to make sure we deblock the
2288            video thread in all cases */
2289         SDL_LockMutex(is->pictq_mutex);
2290         SDL_CondSignal(is->pictq_cond);
2291         SDL_UnlockMutex(is->pictq_mutex);
2292
2293         SDL_WaitThread(is->video_tid, NULL);
2294
2295         packet_queue_flush(&is->videoq);
2296         break;
2297     case AVMEDIA_TYPE_SUBTITLE:
2298         packet_queue_abort(&is->subtitleq);
2299
2300         /* note: we also signal this mutex to make sure we deblock the
2301            video thread in all cases */
2302         SDL_LockMutex(is->subpq_mutex);
2303         is->subtitle_stream_changed = 1;
2304
2305         SDL_CondSignal(is->subpq_cond);
2306         SDL_UnlockMutex(is->subpq_mutex);
2307
2308         SDL_WaitThread(is->subtitle_tid, NULL);
2309
2310         packet_queue_flush(&is->subtitleq);
2311         break;
2312     default:
2313         break;
2314     }
2315
2316     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2317     avcodec_close(avctx);
2318 #if CONFIG_AVFILTER
2319     free_buffer_pool(&is->buffer_pool);
2320 #endif
2321     switch (avctx->codec_type) {
2322     case AVMEDIA_TYPE_AUDIO:
2323         is->audio_st = NULL;
2324         is->audio_stream = -1;
2325         break;
2326     case AVMEDIA_TYPE_VIDEO:
2327         is->video_st = NULL;
2328         is->video_stream = -1;
2329         break;
2330     case AVMEDIA_TYPE_SUBTITLE:
2331         is->subtitle_st = NULL;
2332         is->subtitle_stream = -1;
2333         break;
2334     default:
2335         break;
2336     }
2337 }
2338
2339 static int decode_interrupt_cb(void *ctx)
2340 {
2341     VideoState *is = ctx;
2342     return is->abort_request;
2343 }
2344
2345 /* this thread gets the stream from the disk or the network */
2346 static int read_thread(void *arg)
2347 {
2348     VideoState *is = arg;
2349     AVFormatContext *ic = NULL;
2350     int err, i, ret;
2351     int st_index[AVMEDIA_TYPE_NB];
2352     AVPacket pkt1, *pkt = &pkt1;
2353     int eof = 0;
2354     int pkt_in_play_range = 0;
2355     AVDictionaryEntry *t;
2356     AVDictionary **opts;
2357     int orig_nb_streams;
2358
2359     memset(st_index, -1, sizeof(st_index));
2360     is->last_video_stream = is->video_stream = -1;
2361     is->last_audio_stream = is->audio_stream = -1;
2362     is->last_subtitle_stream = is->subtitle_stream = -1;
2363
2364     ic = avformat_alloc_context();
2365     ic->interrupt_callback.callback = decode_interrupt_cb;
2366     ic->interrupt_callback.opaque = is;
2367     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2368     if (err < 0) {
2369         print_error(is->filename, err);
2370         ret = -1;
2371         goto fail;
2372     }
2373     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2374         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2375         ret = AVERROR_OPTION_NOT_FOUND;
2376         goto fail;
2377     }
2378     is->ic = ic;
2379
2380     if (genpts)
2381         ic->flags |= AVFMT_FLAG_GENPTS;
2382
2383     opts = setup_find_stream_info_opts(ic, codec_opts);
2384     orig_nb_streams = ic->nb_streams;
2385
2386     err = avformat_find_stream_info(ic, opts);
2387     if (err < 0) {
2388         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2389         ret = -1;
2390         goto fail;
2391     }
2392     for (i = 0; i < orig_nb_streams; i++)
2393         av_dict_free(&opts[i]);
2394     av_freep(&opts);
2395
2396     if (ic->pb)
2397         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2398
2399     if (seek_by_bytes < 0)
2400         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2401
2402     /* if seeking requested, we execute it */
2403     if (start_time != AV_NOPTS_VALUE) {
2404         int64_t timestamp;
2405
2406         timestamp = start_time;
2407         /* add the stream start time */
2408         if (ic->start_time != AV_NOPTS_VALUE)
2409             timestamp += ic->start_time;
2410         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2411         if (ret < 0) {
2412             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2413                     is->filename, (double)timestamp / AV_TIME_BASE);
2414         }
2415     }
2416
2417     for (i = 0; i < ic->nb_streams; i++)
2418         ic->streams[i]->discard = AVDISCARD_ALL;
2419     if (!video_disable)
2420         st_index[AVMEDIA_TYPE_VIDEO] =
2421             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2422                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2423     if (!audio_disable)
2424         st_index[AVMEDIA_TYPE_AUDIO] =
2425             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2426                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2427                                 st_index[AVMEDIA_TYPE_VIDEO],
2428                                 NULL, 0);
2429     if (!video_disable)
2430         st_index[AVMEDIA_TYPE_SUBTITLE] =
2431             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2432                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2433                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2434                                  st_index[AVMEDIA_TYPE_AUDIO] :
2435                                  st_index[AVMEDIA_TYPE_VIDEO]),
2436                                 NULL, 0);
2437     if (show_status) {
2438         av_dump_format(ic, 0, is->filename, 0);
2439     }
2440
2441     is->show_mode = show_mode;
2442
2443     /* open the streams */
2444     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2445         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2446     }
2447
2448     ret = -1;
2449     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2450         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2451     }
2452     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2453     if (is->show_mode == SHOW_MODE_NONE)
2454         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2455
2456     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2457         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2458     }
2459
2460     if (is->video_stream < 0 && is->audio_stream < 0) {
2461         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2462         ret = -1;
2463         goto fail;
2464     }
2465
2466     for (;;) {
2467         if (is->abort_request)
2468             break;
2469         if (is->paused != is->last_paused) {
2470             is->last_paused = is->paused;
2471             if (is->paused)
2472                 is->read_pause_return = av_read_pause(ic);
2473             else
2474                 av_read_play(ic);
2475         }
2476 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2477         if (is->paused &&
2478                 (!strcmp(ic->iformat->name, "rtsp") ||
2479                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2480             /* wait 10 ms to avoid trying to get another packet */
2481             /* XXX: horrible */
2482             SDL_Delay(10);
2483             continue;
2484         }
2485 #endif
2486         if (is->seek_req) {
2487             int64_t seek_target = is->seek_pos;
2488             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2489             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2490 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2491 //      of the seek_pos/seek_rel variables
2492
2493             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2494             if (ret < 0) {
2495                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2496             } else {
2497                 if (is->audio_stream >= 0) {
2498                     packet_queue_flush(&is->audioq);
2499                     packet_queue_put(&is->audioq, &flush_pkt);
2500                 }
2501                 if (is->subtitle_stream >= 0) {
2502                     packet_queue_flush(&is->subtitleq);
2503                     packet_queue_put(&is->subtitleq, &flush_pkt);
2504                 }
2505                 if (is->video_stream >= 0) {
2506                     packet_queue_flush(&is->videoq);
2507                     packet_queue_put(&is->videoq, &flush_pkt);
2508                 }
2509             }
2510             is->seek_req = 0;
2511             eof = 0;
2512         }
2513         if (is->que_attachments_req) {
2514             avformat_queue_attached_pictures(ic);
2515             is->que_attachments_req = 0;
2516         }
2517
2518         /* if the queue are full, no need to read more */
2519         if (!infinite_buffer &&
2520               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2521             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2522                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2523                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2524             /* wait 10 ms */
2525             SDL_Delay(10);
2526             continue;
2527         }
2528         if (eof) {
2529             if (is->video_stream >= 0) {
2530                 av_init_packet(pkt);
2531                 pkt->data = NULL;
2532                 pkt->size = 0;
2533                 pkt->stream_index = is->video_stream;
2534                 packet_queue_put(&is->videoq, pkt);
2535             }
2536             if (is->audio_stream >= 0 &&
2537                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2538                 av_init_packet(pkt);
2539                 pkt->data = NULL;
2540                 pkt->size = 0;
2541                 pkt->stream_index = is->audio_stream;
2542                 packet_queue_put(&is->audioq, pkt);
2543             }
2544             SDL_Delay(10);
2545             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2546                 if (loop != 1 && (!loop || --loop)) {
2547                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2548                 } else if (autoexit) {
2549                     ret = AVERROR_EOF;
2550                     goto fail;
2551                 }
2552             }
2553             eof=0;
2554             continue;
2555         }
2556         ret = av_read_frame(ic, pkt);
2557         if (ret < 0) {
2558             if (ret == AVERROR_EOF || url_feof(ic->pb))
2559                 eof = 1;
2560             if (ic->pb && ic->pb->error)
2561                 break;
2562             SDL_Delay(100); /* wait for user event */
2563             continue;
2564         }
2565         /* check if packet is in play range specified by user, then queue, otherwise discard */
2566         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2567                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2568                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2569                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2570                 <= ((double)duration / 1000000);
2571         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2572             packet_queue_put(&is->audioq, pkt);
2573         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2574             packet_queue_put(&is->videoq, pkt);
2575         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2576             packet_queue_put(&is->subtitleq, pkt);
2577         } else {
2578             av_free_packet(pkt);
2579         }
2580     }
2581     /* wait until the end */
2582     while (!is->abort_request) {
2583         SDL_Delay(100);
2584     }
2585
2586     ret = 0;
2587  fail:
2588     /* close each stream */
2589     if (is->audio_stream >= 0)
2590         stream_component_close(is, is->audio_stream);
2591     if (is->video_stream >= 0)
2592         stream_component_close(is, is->video_stream);
2593     if (is->subtitle_stream >= 0)
2594         stream_component_close(is, is->subtitle_stream);
2595     if (is->ic) {
2596         avformat_close_input(&is->ic);
2597     }
2598
2599     if (ret != 0) {
2600         SDL_Event event;
2601
2602         event.type = FF_QUIT_EVENT;
2603         event.user.data1 = is;
2604         SDL_PushEvent(&event);
2605     }
2606     return 0;
2607 }
2608
2609 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2610 {
2611     VideoState *is;
2612
2613     is = av_mallocz(sizeof(VideoState));
2614     if (!is)
2615         return NULL;
2616     av_strlcpy(is->filename, filename, sizeof(is->filename));
2617     is->iformat = iformat;
2618     is->ytop    = 0;
2619     is->xleft   = 0;
2620
2621     /* start video display */
2622     is->pictq_mutex = SDL_CreateMutex();
2623     is->pictq_cond  = SDL_CreateCond();
2624
2625     is->subpq_mutex = SDL_CreateMutex();
2626     is->subpq_cond  = SDL_CreateCond();
2627
2628     packet_queue_init(&is->videoq);
2629     packet_queue_init(&is->audioq);
2630     packet_queue_init(&is->subtitleq);
2631
2632     is->av_sync_type = av_sync_type;
2633     is->read_tid     = SDL_CreateThread(read_thread, is);
2634     if (!is->read_tid) {
2635         av_free(is);
2636         return NULL;
2637     }
2638     return is;
2639 }
2640
2641 static void stream_cycle_channel(VideoState *is, int codec_type)
2642 {
2643     AVFormatContext *ic = is->ic;
2644     int start_index, stream_index;
2645     int old_index;
2646     AVStream *st;
2647
2648     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2649         start_index = is->last_video_stream;
2650         old_index = is->video_stream;
2651     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2652         start_index = is->last_audio_stream;
2653         old_index = is->audio_stream;
2654     } else {
2655         start_index = is->last_subtitle_stream;
2656         old_index = is->subtitle_stream;
2657     }
2658     stream_index = start_index;
2659     for (;;) {
2660         if (++stream_index >= is->ic->nb_streams)
2661         {
2662             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2663             {
2664                 stream_index = -1;
2665                 is->last_subtitle_stream = -1;
2666                 goto the_end;
2667             }
2668             if (start_index == -1)
2669                 return;
2670             stream_index = 0;
2671         }
2672         if (stream_index == start_index)
2673             return;
2674         st = ic->streams[stream_index];
2675         if (st->codec->codec_type == codec_type) {
2676             /* check that parameters are OK */
2677             switch (codec_type) {
2678             case AVMEDIA_TYPE_AUDIO:
2679                 if (st->codec->sample_rate != 0 &&
2680                     st->codec->channels != 0)
2681                     goto the_end;
2682                 break;
2683             case AVMEDIA_TYPE_VIDEO:
2684             case AVMEDIA_TYPE_SUBTITLE:
2685                 goto the_end;
2686             default:
2687                 break;
2688             }
2689         }
2690     }
2691  the_end:
2692     stream_component_close(is, old_index);
2693     stream_component_open(is, stream_index);
2694     if (codec_type == AVMEDIA_TYPE_VIDEO)
2695         is->que_attachments_req = 1;
2696 }
2697
2698
2699 static void toggle_full_screen(VideoState *is)
2700 {
2701 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2702     /* OS X needs to reallocate the SDL overlays */
2703     int i;
2704     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2705         is->pictq[i].reallocate = 1;
2706 #endif
2707     is_full_screen = !is_full_screen;
2708     video_open(is, 1);
2709 }
2710
2711 static void toggle_pause(VideoState *is)
2712 {
2713     stream_toggle_pause(is);
2714     is->step = 0;
2715 }
2716
2717 static void step_to_next_frame(VideoState *is)
2718 {
2719     /* if the stream is paused unpause it, then step */
2720     if (is->paused)
2721         stream_toggle_pause(is);
2722     is->step = 1;
2723 }
2724
2725 static void toggle_audio_display(VideoState *is)
2726 {
2727     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2728     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2729     fill_rectangle(screen,
2730                 is->xleft, is->ytop, is->width, is->height,
2731                 bgcolor);
2732     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2733 }
2734
2735 /* handle an event sent by the GUI */
2736 static void event_loop(VideoState *cur_stream)
2737 {
2738     SDL_Event event;
2739     double incr, pos, frac;
2740
2741     for (;;) {
2742         double x;
2743         SDL_WaitEvent(&event);
2744         switch (event.type) {
2745         case SDL_KEYDOWN:
2746             if (exit_on_keydown) {
2747                 do_exit(cur_stream);
2748                 break;
2749             }
2750             switch (event.key.keysym.sym) {
2751             case SDLK_ESCAPE:
2752             case SDLK_q:
2753                 do_exit(cur_stream);
2754                 break;
2755             case SDLK_f:
2756                 toggle_full_screen(cur_stream);
2757                 cur_stream->force_refresh = 1;
2758                 break;
2759             case SDLK_p:
2760             case SDLK_SPACE:
2761                 toggle_pause(cur_stream);
2762                 break;
2763             case SDLK_s: // S: Step to next frame
2764                 step_to_next_frame(cur_stream);
2765                 break;
2766             case SDLK_a:
2767                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2768                 break;
2769             case SDLK_v:
2770                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2771                 break;
2772             case SDLK_t:
2773                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2774                 break;
2775             case SDLK_w:
2776                 toggle_audio_display(cur_stream);
2777                 cur_stream->force_refresh = 1;
2778                 break;
2779             case SDLK_PAGEUP:
2780                 incr = 600.0;
2781                 goto do_seek;
2782             case SDLK_PAGEDOWN:
2783                 incr = -600.0;
2784                 goto do_seek;
2785             case SDLK_LEFT:
2786                 incr = -10.0;
2787                 goto do_seek;
2788             case SDLK_RIGHT:
2789                 incr = 10.0;
2790                 goto do_seek;
2791             case SDLK_UP:
2792                 incr = 60.0;
2793                 goto do_seek;
2794             case SDLK_DOWN:
2795                 incr = -60.0;
2796             do_seek:
2797                     if (seek_by_bytes) {
2798                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2799                             pos = cur_stream->video_current_pos;
2800                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2801                             pos = cur_stream->audio_pkt.pos;
2802                         } else
2803                             pos = avio_tell(cur_stream->ic->pb);
2804                         if (cur_stream->ic->bit_rate)
2805                             incr *= cur_stream->ic->bit_rate / 8.0;
2806                         else
2807                             incr *= 180000.0;
2808                         pos += incr;
2809                         stream_seek(cur_stream, pos, incr, 1);
2810                     } else {
2811                         pos = get_master_clock(cur_stream);
2812                         pos += incr;
2813                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2814                     }
2815                 break;
2816             default:
2817                 break;
2818             }
2819             break;
2820         case SDL_VIDEOEXPOSE:
2821             cur_stream->force_refresh = 1;
2822             break;
2823         case SDL_MOUSEBUTTONDOWN:
2824             if (exit_on_mousedown) {
2825                 do_exit(cur_stream);
2826                 break;
2827             }
2828         case SDL_MOUSEMOTION:
2829             if (event.type == SDL_MOUSEBUTTONDOWN) {
2830                 x = event.button.x;
2831             } else {
2832                 if (event.motion.state != SDL_PRESSED)
2833                     break;
2834                 x = event.motion.x;
2835             }
2836                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2837                     uint64_t size =  avio_size(cur_stream->ic->pb);
2838                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2839                 } else {
2840                     int64_t ts;
2841                     int ns, hh, mm, ss;
2842                     int tns, thh, tmm, tss;
2843                     tns  = cur_stream->ic->duration / 1000000LL;
2844                     thh  = tns / 3600;
2845                     tmm  = (tns % 3600) / 60;
2846                     tss  = (tns % 60);
2847                     frac = x / cur_stream->width;
2848                     ns   = frac * tns;
2849                     hh   = ns / 3600;
2850                     mm   = (ns % 3600) / 60;
2851                     ss   = (ns % 60);
2852                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2853                             hh, mm, ss, thh, tmm, tss);
2854                     ts = frac * cur_stream->ic->duration;
2855                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2856                         ts += cur_stream->ic->start_time;
2857                     stream_seek(cur_stream, ts, 0, 0);
2858                 }
2859             break;
2860         case SDL_VIDEORESIZE:
2861                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2862                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2863                 screen_width  = cur_stream->width  = event.resize.w;
2864                 screen_height = cur_stream->height = event.resize.h;
2865                 cur_stream->force_refresh = 1;
2866             break;
2867         case SDL_QUIT:
2868         case FF_QUIT_EVENT:
2869             do_exit(cur_stream);
2870             break;
2871         case FF_ALLOC_EVENT:
2872             alloc_picture(event.user.data1);
2873             break;
2874         case FF_REFRESH_EVENT:
2875             video_refresh(event.user.data1);
2876             cur_stream->refresh = 0;
2877             break;
2878         default:
2879             break;
2880         }
2881     }
2882 }
2883
2884 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2885 {
2886     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2887     return opt_default(NULL, "video_size", arg);
2888 }
2889
2890 static int opt_width(void *optctx, const char *opt, const char *arg)
2891 {
2892     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2893     return 0;
2894 }
2895
2896 static int opt_height(void *optctx, const char *opt, const char *arg)
2897 {
2898     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2899     return 0;
2900 }
2901
2902 static int opt_format(void *optctx, const char *opt, const char *arg)
2903 {
2904     file_iformat = av_find_input_format(arg);
2905     if (!file_iformat) {
2906         fprintf(stderr, "Unknown input format: %s\n", arg);
2907         return AVERROR(EINVAL);
2908     }
2909     return 0;
2910 }
2911
2912 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2913 {
2914     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2915     return opt_default(NULL, "pixel_format", arg);
2916 }
2917
2918 static int opt_sync(void *optctx, const char *opt, const char *arg)
2919 {
2920     if (!strcmp(arg, "audio"))
2921         av_sync_type = AV_SYNC_AUDIO_MASTER;
2922     else if (!strcmp(arg, "video"))
2923         av_sync_type = AV_SYNC_VIDEO_MASTER;
2924     else if (!strcmp(arg, "ext"))
2925         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2926     else {
2927         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2928         exit(1);
2929     }
2930     return 0;
2931 }
2932
2933 static int opt_seek(void *optctx, const char *opt, const char *arg)
2934 {
2935     start_time = parse_time_or_die(opt, arg, 1);
2936     return 0;
2937 }
2938
2939 static int opt_duration(void *optctx, const char *opt, const char *arg)
2940 {
2941     duration = parse_time_or_die(opt, arg, 1);
2942     return 0;
2943 }
2944
2945 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
2946 {
2947     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2948                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2949                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2950                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2951     return 0;
2952 }
2953
2954 static void opt_input_file(void *optctx, const char *filename)
2955 {
2956     if (input_filename) {
2957         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2958                 filename, input_filename);
2959         exit_program(1);
2960     }
2961     if (!strcmp(filename, "-"))
2962         filename = "pipe:";
2963     input_filename = filename;
2964 }
2965
2966 static int opt_codec(void *o, const char *opt, const char *arg)
2967 {
2968     switch(opt[strlen(opt)-1]){
2969     case 'a' :    audio_codec_name = arg; break;
2970     case 's' : subtitle_codec_name = arg; break;
2971     case 'v' :    video_codec_name = arg; break;
2972     }
2973     return 0;
2974 }
2975
2976 static int dummy;
2977
2978 static const OptionDef options[] = {
2979 #include "cmdutils_common_opts.h"
2980     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2981     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2982     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2983     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2984     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2985     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2986     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2987     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2988     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2989     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2990     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2991     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2992     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2993     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2994     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2995     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2996     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2997     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2998     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2999     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3000     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3001     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3002     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3003     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3004     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3005     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3006     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3007     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3008     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3009     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3010     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3011     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3012     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3013     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3014 #if CONFIG_AVFILTER
3015     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3016 #endif
3017     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3018     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3019     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3020     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3021     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3022     { NULL, },
3023 };
3024
3025 static void show_usage(void)
3026 {
3027     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3028     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3029     av_log(NULL, AV_LOG_INFO, "\n");
3030 }
3031
3032 void show_help_default(const char *opt, const char *arg)
3033 {
3034     av_log_set_callback(log_callback_help);
3035     show_usage();
3036     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3037     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3038     printf("\n");
3039     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3040     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3041 #if !CONFIG_AVFILTER
3042     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3043 #else
3044     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3045 #endif
3046     printf("\nWhile playing:\n"
3047            "q, ESC              quit\n"
3048            "f                   toggle full screen\n"
3049            "p, SPC              pause\n"
3050            "a                   cycle audio channel\n"
3051            "v                   cycle video channel\n"
3052            "t                   cycle subtitle channel\n"
3053            "w                   show audio waves\n"
3054            "s                   activate frame-step mode\n"
3055            "left/right          seek backward/forward 10 seconds\n"
3056            "down/up             seek backward/forward 1 minute\n"
3057            "page down/page up   seek backward/forward 10 minutes\n"
3058            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3059            );
3060 }
3061
3062 static int lockmgr(void **mtx, enum AVLockOp op)
3063 {
3064    switch(op) {
3065       case AV_LOCK_CREATE:
3066           *mtx = SDL_CreateMutex();
3067           if(!*mtx)
3068               return 1;
3069           return 0;
3070       case AV_LOCK_OBTAIN:
3071           return !!SDL_LockMutex(*mtx);
3072       case AV_LOCK_RELEASE:
3073           return !!SDL_UnlockMutex(*mtx);
3074       case AV_LOCK_DESTROY:
3075           SDL_DestroyMutex(*mtx);
3076           return 0;
3077    }
3078    return 1;
3079 }
3080
3081 /* Called from the main */
3082 int main(int argc, char **argv)
3083 {
3084     int flags;
3085     VideoState *is;
3086     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3087
3088     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3089     parse_loglevel(argc, argv, options);
3090
3091     /* register all codecs, demux and protocols */
3092     avcodec_register_all();
3093 #if CONFIG_AVDEVICE
3094     avdevice_register_all();
3095 #endif
3096 #if CONFIG_AVFILTER
3097     avfilter_register_all();
3098 #endif
3099     av_register_all();
3100     avformat_network_init();
3101
3102     init_opts();
3103
3104     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3105     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3106
3107     show_banner(argc, argv, options);
3108
3109     parse_options(NULL, argc, argv, options, opt_input_file);
3110
3111     if (!input_filename) {
3112         show_usage();
3113         fprintf(stderr, "An input file must be specified\n");
3114         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3115         exit(1);
3116     }
3117
3118     if (display_disable) {
3119         video_disable = 1;
3120     }
3121     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3122     if (audio_disable)
3123         flags &= ~SDL_INIT_AUDIO;
3124     if (display_disable)
3125         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3126 #if !defined(__MINGW32__) && !defined(__APPLE__)
3127     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3128 #endif
3129     if (SDL_Init (flags)) {
3130         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3131         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3132         exit(1);
3133     }
3134
3135     if (!display_disable) {
3136 #if HAVE_SDL_VIDEO_SIZE
3137         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3138         fs_screen_width = vi->current_w;
3139         fs_screen_height = vi->current_h;
3140 #endif
3141     }
3142
3143     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3144     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3145     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3146
3147     if (av_lockmgr_register(lockmgr)) {
3148         fprintf(stderr, "Could not initialize lock manager!\n");
3149         do_exit(NULL);
3150     }
3151
3152     av_init_packet(&flush_pkt);
3153     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3154
3155     is = stream_open(input_filename, file_iformat);
3156     if (!is) {
3157         fprintf(stderr, "Failed to initialize VideoState!\n");
3158         do_exit(NULL);
3159     }
3160
3161     event_loop(is);
3162
3163     /* never returns */
3164
3165     return 0;
3166 }