ffplay: always free buffersink_params in configure_video_filters
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct MyAVPacketList {
91     AVPacket pkt;
92     struct MyAVPacketList *next;
93     int serial;
94 } MyAVPacketList;
95
96 typedef struct PacketQueue {
97     MyAVPacketList *first_pkt, *last_pkt;
98     int nb_packets;
99     int size;
100     int abort_request;
101     int serial;
102     SDL_mutex *mutex;
103     SDL_cond *cond;
104 } PacketQueue;
105
106 #define VIDEO_PICTURE_QUEUE_SIZE 4
107 #define SUBPICTURE_QUEUE_SIZE 4
108
109 typedef struct VideoPicture {
110     double pts;             // presentation timestamp for this picture
111     int64_t pos;            // byte position in file
112     int skip;
113     SDL_Overlay *bmp;
114     int width, height; /* source height & width */
115     AVRational sample_aspect_ratio;
116     int allocated;
117     int reallocate;
118     int serial;
119
120 #if CONFIG_AVFILTER
121     AVFilterBufferRef *picref;
122 #endif
123 } VideoPicture;
124
125 typedef struct SubPicture {
126     double pts; /* presentation time stamp for this picture */
127     AVSubtitle sub;
128 } SubPicture;
129
130 typedef struct AudioParams {
131     int freq;
132     int channels;
133     int channel_layout;
134     enum AVSampleFormat fmt;
135 } AudioParams;
136
137 enum {
138     AV_SYNC_AUDIO_MASTER, /* default choice */
139     AV_SYNC_VIDEO_MASTER,
140     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
141 };
142
143 typedef struct VideoState {
144     SDL_Thread *read_tid;
145     SDL_Thread *video_tid;
146     SDL_Thread *refresh_tid;
147     AVInputFormat *iformat;
148     int no_background;
149     int abort_request;
150     int force_refresh;
151     int paused;
152     int last_paused;
153     int que_attachments_req;
154     int seek_req;
155     int seek_flags;
156     int64_t seek_pos;
157     int64_t seek_rel;
158     int read_pause_return;
159     AVFormatContext *ic;
160
161     int audio_stream;
162
163     int av_sync_type;
164     double external_clock;                   ///< external clock base
165     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
166     int64_t external_clock_time;             ///< last reference time
167
168     double audio_clock;
169     double audio_diff_cum; /* used for AV difference average computation */
170     double audio_diff_avg_coef;
171     double audio_diff_threshold;
172     int audio_diff_avg_count;
173     AVStream *audio_st;
174     PacketQueue audioq;
175     int audio_hw_buf_size;
176     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
177     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
178     uint8_t *audio_buf;
179     uint8_t *audio_buf1;
180     unsigned int audio_buf_size; /* in bytes */
181     int audio_buf_index; /* in bytes */
182     int audio_write_buf_size;
183     AVPacket audio_pkt_temp;
184     AVPacket audio_pkt;
185     int audio_pkt_temp_serial;
186     struct AudioParams audio_src;
187     struct AudioParams audio_tgt;
188     struct SwrContext *swr_ctx;
189     double audio_current_pts;
190     double audio_current_pts_drift;
191     int frame_drops_early;
192     int frame_drops_late;
193     AVFrame *frame;
194
195     enum ShowMode {
196         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
197     } show_mode;
198     int16_t sample_array[SAMPLE_ARRAY_SIZE];
199     int sample_array_index;
200     int last_i_start;
201     RDFTContext *rdft;
202     int rdft_bits;
203     FFTSample *rdft_data;
204     int xpos;
205
206     SDL_Thread *subtitle_tid;
207     int subtitle_stream;
208     int subtitle_stream_changed;
209     AVStream *subtitle_st;
210     PacketQueue subtitleq;
211     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
212     int subpq_size, subpq_rindex, subpq_windex;
213     SDL_mutex *subpq_mutex;
214     SDL_cond *subpq_cond;
215
216     double frame_timer;
217     double frame_last_pts;
218     double frame_last_duration;
219     double frame_last_dropped_pts;
220     double frame_last_returned_time;
221     double frame_last_filter_delay;
222     int64_t frame_last_dropped_pos;
223     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
224     int video_stream;
225     AVStream *video_st;
226     PacketQueue videoq;
227     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
228     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
229     int64_t video_current_pos;      // current displayed file pos
230     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
231     int pictq_size, pictq_rindex, pictq_windex;
232     SDL_mutex *pictq_mutex;
233     SDL_cond *pictq_cond;
234 #if !CONFIG_AVFILTER
235     struct SwsContext *img_convert_ctx;
236 #endif
237     SDL_Rect last_display_rect;
238
239     char filename[1024];
240     int width, height, xleft, ytop;
241     int step;
242
243 #if CONFIG_AVFILTER
244     AVFilterContext *in_video_filter;   // the first filter in the video chain
245     AVFilterContext *out_video_filter;  // the last filter in the video chain
246     int use_dr1;
247     FrameBuffer *buffer_pool;
248 #endif
249
250     int refresh;
251     int last_video_stream, last_audio_stream, last_subtitle_stream;
252
253     SDL_cond *continue_read_thread;
254 } VideoState;
255
256 /* options specified by the user */
257 static AVInputFormat *file_iformat;
258 static const char *input_filename;
259 static const char *window_title;
260 static int fs_screen_width;
261 static int fs_screen_height;
262 static int screen_width  = 0;
263 static int screen_height = 0;
264 static int audio_disable;
265 static int video_disable;
266 static int wanted_stream[AVMEDIA_TYPE_NB] = {
267     [AVMEDIA_TYPE_AUDIO]    = -1,
268     [AVMEDIA_TYPE_VIDEO]    = -1,
269     [AVMEDIA_TYPE_SUBTITLE] = -1,
270 };
271 static int seek_by_bytes = -1;
272 static int display_disable;
273 static int show_status = 1;
274 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
275 static int64_t start_time = AV_NOPTS_VALUE;
276 static int64_t duration = AV_NOPTS_VALUE;
277 static int workaround_bugs = 1;
278 static int fast = 0;
279 static int genpts = 0;
280 static int lowres = 0;
281 static int idct = FF_IDCT_AUTO;
282 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
283 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
284 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
285 static int error_concealment = 3;
286 static int decoder_reorder_pts = -1;
287 static int autoexit;
288 static int exit_on_keydown;
289 static int exit_on_mousedown;
290 static int loop = 1;
291 static int framedrop = -1;
292 static int infinite_buffer = -1;
293 static enum ShowMode show_mode = SHOW_MODE_NONE;
294 static const char *audio_codec_name;
295 static const char *subtitle_codec_name;
296 static const char *video_codec_name;
297 static int rdftspeed = 20;
298 #if CONFIG_AVFILTER
299 static char *vfilters = NULL;
300 #endif
301
302 /* current context */
303 static int is_full_screen;
304 static int64_t audio_callback_time;
305
306 static AVPacket flush_pkt;
307
308 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
309 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
310 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
311
312 static SDL_Surface *screen;
313
314 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
315
316 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
317 {
318     MyAVPacketList *pkt1;
319
320     if (q->abort_request)
321        return -1;
322
323     pkt1 = av_malloc(sizeof(MyAVPacketList));
324     if (!pkt1)
325         return -1;
326     pkt1->pkt = *pkt;
327     pkt1->next = NULL;
328     if (pkt == &flush_pkt)
329         q->serial++;
330     pkt1->serial = q->serial;
331
332     if (!q->last_pkt)
333         q->first_pkt = pkt1;
334     else
335         q->last_pkt->next = pkt1;
336     q->last_pkt = pkt1;
337     q->nb_packets++;
338     q->size += pkt1->pkt.size + sizeof(*pkt1);
339     /* XXX: should duplicate packet data in DV case */
340     SDL_CondSignal(q->cond);
341     return 0;
342 }
343
344 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
345 {
346     int ret;
347
348     /* duplicate the packet */
349     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
350         return -1;
351
352     SDL_LockMutex(q->mutex);
353     ret = packet_queue_put_private(q, pkt);
354     SDL_UnlockMutex(q->mutex);
355
356     if (pkt != &flush_pkt && ret < 0)
357         av_free_packet(pkt);
358
359     return ret;
360 }
361
362 /* packet queue handling */
363 static void packet_queue_init(PacketQueue *q)
364 {
365     memset(q, 0, sizeof(PacketQueue));
366     q->mutex = SDL_CreateMutex();
367     q->cond = SDL_CreateCond();
368     q->abort_request = 1;
369 }
370
371 static void packet_queue_flush(PacketQueue *q)
372 {
373     MyAVPacketList *pkt, *pkt1;
374
375     SDL_LockMutex(q->mutex);
376     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
377         pkt1 = pkt->next;
378         av_free_packet(&pkt->pkt);
379         av_freep(&pkt);
380     }
381     q->last_pkt = NULL;
382     q->first_pkt = NULL;
383     q->nb_packets = 0;
384     q->size = 0;
385     SDL_UnlockMutex(q->mutex);
386 }
387
388 static void packet_queue_destroy(PacketQueue *q)
389 {
390     packet_queue_flush(q);
391     SDL_DestroyMutex(q->mutex);
392     SDL_DestroyCond(q->cond);
393 }
394
395 static void packet_queue_abort(PacketQueue *q)
396 {
397     SDL_LockMutex(q->mutex);
398
399     q->abort_request = 1;
400
401     SDL_CondSignal(q->cond);
402
403     SDL_UnlockMutex(q->mutex);
404 }
405
406 static void packet_queue_start(PacketQueue *q)
407 {
408     SDL_LockMutex(q->mutex);
409     q->abort_request = 0;
410     packet_queue_put_private(q, &flush_pkt);
411     SDL_UnlockMutex(q->mutex);
412 }
413
414 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
415 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
416 {
417     MyAVPacketList *pkt1;
418     int ret;
419
420     SDL_LockMutex(q->mutex);
421
422     for (;;) {
423         if (q->abort_request) {
424             ret = -1;
425             break;
426         }
427
428         pkt1 = q->first_pkt;
429         if (pkt1) {
430             q->first_pkt = pkt1->next;
431             if (!q->first_pkt)
432                 q->last_pkt = NULL;
433             q->nb_packets--;
434             q->size -= pkt1->pkt.size + sizeof(*pkt1);
435             *pkt = pkt1->pkt;
436             if (serial)
437                 *serial = pkt1->serial;
438             av_free(pkt1);
439             ret = 1;
440             break;
441         } else if (!block) {
442             ret = 0;
443             break;
444         } else {
445             SDL_CondWait(q->cond, q->mutex);
446         }
447     }
448     SDL_UnlockMutex(q->mutex);
449     return ret;
450 }
451
452 static inline void fill_rectangle(SDL_Surface *screen,
453                                   int x, int y, int w, int h, int color, int update)
454 {
455     SDL_Rect rect;
456     rect.x = x;
457     rect.y = y;
458     rect.w = w;
459     rect.h = h;
460     SDL_FillRect(screen, &rect, color);
461     if (update && w > 0 && h > 0)
462         SDL_UpdateRect(screen, x, y, w, h);
463 }
464
465 /* draw only the border of a rectangle */
466 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
467 {
468     int w1, w2, h1, h2;
469
470     /* fill the background */
471     w1 = x;
472     if (w1 < 0)
473         w1 = 0;
474     w2 = width - (x + w);
475     if (w2 < 0)
476         w2 = 0;
477     h1 = y;
478     if (h1 < 0)
479         h1 = 0;
480     h2 = height - (y + h);
481     if (h2 < 0)
482         h2 = 0;
483     fill_rectangle(screen,
484                    xleft, ytop,
485                    w1, height,
486                    color, update);
487     fill_rectangle(screen,
488                    xleft + width - w2, ytop,
489                    w2, height,
490                    color, update);
491     fill_rectangle(screen,
492                    xleft + w1, ytop,
493                    width - w1 - w2, h1,
494                    color, update);
495     fill_rectangle(screen,
496                    xleft + w1, ytop + height - h2,
497                    width - w1 - w2, h2,
498                    color, update);
499 }
500
501 #define ALPHA_BLEND(a, oldp, newp, s)\
502 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
503
504 #define RGBA_IN(r, g, b, a, s)\
505 {\
506     unsigned int v = ((const uint32_t *)(s))[0];\
507     a = (v >> 24) & 0xff;\
508     r = (v >> 16) & 0xff;\
509     g = (v >> 8) & 0xff;\
510     b = v & 0xff;\
511 }
512
513 #define YUVA_IN(y, u, v, a, s, pal)\
514 {\
515     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
516     a = (val >> 24) & 0xff;\
517     y = (val >> 16) & 0xff;\
518     u = (val >> 8) & 0xff;\
519     v = val & 0xff;\
520 }
521
522 #define YUVA_OUT(d, y, u, v, a)\
523 {\
524     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
525 }
526
527
528 #define BPP 1
529
530 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
531 {
532     int wrap, wrap3, width2, skip2;
533     int y, u, v, a, u1, v1, a1, w, h;
534     uint8_t *lum, *cb, *cr;
535     const uint8_t *p;
536     const uint32_t *pal;
537     int dstx, dsty, dstw, dsth;
538
539     dstw = av_clip(rect->w, 0, imgw);
540     dsth = av_clip(rect->h, 0, imgh);
541     dstx = av_clip(rect->x, 0, imgw - dstw);
542     dsty = av_clip(rect->y, 0, imgh - dsth);
543     lum = dst->data[0] + dsty * dst->linesize[0];
544     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
545     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
546
547     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
548     skip2 = dstx >> 1;
549     wrap = dst->linesize[0];
550     wrap3 = rect->pict.linesize[0];
551     p = rect->pict.data[0];
552     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
553
554     if (dsty & 1) {
555         lum += dstx;
556         cb += skip2;
557         cr += skip2;
558
559         if (dstx & 1) {
560             YUVA_IN(y, u, v, a, p, pal);
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
563             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
564             cb++;
565             cr++;
566             lum++;
567             p += BPP;
568         }
569         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575
576             YUVA_IN(y, u, v, a, p + BPP, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += 2 * BPP;
586             lum += 2;
587         }
588         if (w) {
589             YUVA_IN(y, u, v, a, p, pal);
590             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
592             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
593             p++;
594             lum++;
595         }
596         p += wrap3 - dstw * BPP;
597         lum += wrap - dstw - dstx;
598         cb += dst->linesize[1] - width2 - skip2;
599         cr += dst->linesize[2] - width2 - skip2;
600     }
601     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
602         lum += dstx;
603         cb += skip2;
604         cr += skip2;
605
606         if (dstx & 1) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             p += wrap3;
613             lum += wrap;
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621             cb++;
622             cr++;
623             p += -wrap3 + BPP;
624             lum += -wrap + 1;
625         }
626         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
627             YUVA_IN(y, u, v, a, p, pal);
628             u1 = u;
629             v1 = v;
630             a1 = a;
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632
633             YUVA_IN(y, u, v, a, p + BPP, pal);
634             u1 += u;
635             v1 += v;
636             a1 += a;
637             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
638             p += wrap3;
639             lum += wrap;
640
641             YUVA_IN(y, u, v, a, p, pal);
642             u1 += u;
643             v1 += v;
644             a1 += a;
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646
647             YUVA_IN(y, u, v, a, p + BPP, pal);
648             u1 += u;
649             v1 += v;
650             a1 += a;
651             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
652
653             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
654             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
655
656             cb++;
657             cr++;
658             p += -wrap3 + 2 * BPP;
659             lum += -wrap + 2;
660         }
661         if (w) {
662             YUVA_IN(y, u, v, a, p, pal);
663             u1 = u;
664             v1 = v;
665             a1 = a;
666             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
667             p += wrap3;
668             lum += wrap;
669             YUVA_IN(y, u, v, a, p, pal);
670             u1 += u;
671             v1 += v;
672             a1 += a;
673             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
675             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
676             cb++;
677             cr++;
678             p += -wrap3 + BPP;
679             lum += -wrap + 1;
680         }
681         p += wrap3 + (wrap3 - dstw * BPP);
682         lum += wrap + (wrap - dstw - dstx);
683         cb += dst->linesize[1] - width2 - skip2;
684         cr += dst->linesize[2] - width2 - skip2;
685     }
686     /* handle odd height */
687     if (h) {
688         lum += dstx;
689         cb += skip2;
690         cr += skip2;
691
692         if (dstx & 1) {
693             YUVA_IN(y, u, v, a, p, pal);
694             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
695             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
696             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
697             cb++;
698             cr++;
699             lum++;
700             p += BPP;
701         }
702         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
703             YUVA_IN(y, u, v, a, p, pal);
704             u1 = u;
705             v1 = v;
706             a1 = a;
707             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
708
709             YUVA_IN(y, u, v, a, p + BPP, pal);
710             u1 += u;
711             v1 += v;
712             a1 += a;
713             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
714             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
715             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
716             cb++;
717             cr++;
718             p += 2 * BPP;
719             lum += 2;
720         }
721         if (w) {
722             YUVA_IN(y, u, v, a, p, pal);
723             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
724             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
725             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
726         }
727     }
728 }
729
730 static void free_subpicture(SubPicture *sp)
731 {
732     avsubtitle_free(&sp->sub);
733 }
734
735 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
736 {
737     float aspect_ratio;
738     int width, height, x, y;
739
740     if (vp->sample_aspect_ratio.num == 0)
741         aspect_ratio = 0;
742     else
743         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
744
745     if (aspect_ratio <= 0.0)
746         aspect_ratio = 1.0;
747     aspect_ratio *= (float)vp->width / (float)vp->height;
748
749     /* XXX: we suppose the screen has a 1.0 pixel ratio */
750     height = scr_height;
751     width = ((int)rint(height * aspect_ratio)) & ~1;
752     if (width > scr_width) {
753         width = scr_width;
754         height = ((int)rint(width / aspect_ratio)) & ~1;
755     }
756     x = (scr_width - width) / 2;
757     y = (scr_height - height) / 2;
758     rect->x = scr_xleft + x;
759     rect->y = scr_ytop  + y;
760     rect->w = FFMAX(width,  1);
761     rect->h = FFMAX(height, 1);
762 }
763
764 static void video_image_display(VideoState *is)
765 {
766     VideoPicture *vp;
767     SubPicture *sp;
768     AVPicture pict;
769     SDL_Rect rect;
770     int i;
771
772     vp = &is->pictq[is->pictq_rindex];
773     if (vp->bmp) {
774         if (is->subtitle_st) {
775             if (is->subpq_size > 0) {
776                 sp = &is->subpq[is->subpq_rindex];
777
778                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
779                     SDL_LockYUVOverlay (vp->bmp);
780
781                     pict.data[0] = vp->bmp->pixels[0];
782                     pict.data[1] = vp->bmp->pixels[2];
783                     pict.data[2] = vp->bmp->pixels[1];
784
785                     pict.linesize[0] = vp->bmp->pitches[0];
786                     pict.linesize[1] = vp->bmp->pitches[2];
787                     pict.linesize[2] = vp->bmp->pitches[1];
788
789                     for (i = 0; i < sp->sub.num_rects; i++)
790                         blend_subrect(&pict, sp->sub.rects[i],
791                                       vp->bmp->w, vp->bmp->h);
792
793                     SDL_UnlockYUVOverlay (vp->bmp);
794                 }
795             }
796         }
797
798         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
799
800         SDL_DisplayYUVOverlay(vp->bmp, &rect);
801
802         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
803             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
804             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
805             is->last_display_rect = rect;
806         }
807     }
808 }
809
810 static inline int compute_mod(int a, int b)
811 {
812     return a < 0 ? a%b + b : a%b;
813 }
814
815 static void video_audio_display(VideoState *s)
816 {
817     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818     int ch, channels, h, h2, bgcolor, fgcolor;
819     int16_t time_diff;
820     int rdft_bits, nb_freq;
821
822     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
823         ;
824     nb_freq = 1 << (rdft_bits - 1);
825
826     /* compute display index : center on currently output samples */
827     channels = s->audio_tgt.channels;
828     nb_display_channels = channels;
829     if (!s->paused) {
830         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
831         n = 2 * channels;
832         delay = s->audio_write_buf_size;
833         delay /= n;
834
835         /* to be more precise, we take into account the time spent since
836            the last buffer computation */
837         if (audio_callback_time) {
838             time_diff = av_gettime() - audio_callback_time;
839             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
840         }
841
842         delay += 2 * data_used;
843         if (delay < data_used)
844             delay = data_used;
845
846         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847         if (s->show_mode == SHOW_MODE_WAVES) {
848             h = INT_MIN;
849             for (i = 0; i < 1000; i += channels) {
850                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851                 int a = s->sample_array[idx];
852                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
853                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
854                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
855                 int score = a - d;
856                 if (h < score && (b ^ c) < 0) {
857                     h = score;
858                     i_start = idx;
859                 }
860             }
861         }
862
863         s->last_i_start = i_start;
864     } else {
865         i_start = s->last_i_start;
866     }
867
868     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869     if (s->show_mode == SHOW_MODE_WAVES) {
870         fill_rectangle(screen,
871                        s->xleft, s->ytop, s->width, s->height,
872                        bgcolor, 0);
873
874         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875
876         /* total height for one channel */
877         h = s->height / nb_display_channels;
878         /* graph height / 2 */
879         h2 = (h * 9) / 20;
880         for (ch = 0; ch < nb_display_channels; ch++) {
881             i = i_start + ch;
882             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883             for (x = 0; x < s->width; x++) {
884                 y = (s->sample_array[i] * h2) >> 15;
885                 if (y < 0) {
886                     y = -y;
887                     ys = y1 - y;
888                 } else {
889                     ys = y1;
890                 }
891                 fill_rectangle(screen,
892                                s->xleft + x, ys, 1, y,
893                                fgcolor, 0);
894                 i += channels;
895                 if (i >= SAMPLE_ARRAY_SIZE)
896                     i -= SAMPLE_ARRAY_SIZE;
897             }
898         }
899
900         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901
902         for (ch = 1; ch < nb_display_channels; ch++) {
903             y = s->ytop + ch * h;
904             fill_rectangle(screen,
905                            s->xleft, y, s->width, 1,
906                            fgcolor, 0);
907         }
908         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909     } else {
910         nb_display_channels= FFMIN(nb_display_channels, 2);
911         if (rdft_bits != s->rdft_bits) {
912             av_rdft_end(s->rdft);
913             av_free(s->rdft_data);
914             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915             s->rdft_bits = rdft_bits;
916             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
917         }
918         {
919             FFTSample *data[2];
920             for (ch = 0; ch < nb_display_channels; ch++) {
921                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
922                 i = i_start + ch;
923                 for (x = 0; x < 2 * nb_freq; x++) {
924                     double w = (x-nb_freq) * (1.0 / nb_freq);
925                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
926                     i += channels;
927                     if (i >= SAMPLE_ARRAY_SIZE)
928                         i -= SAMPLE_ARRAY_SIZE;
929                 }
930                 av_rdft_calc(s->rdft, data[ch]);
931             }
932             // least efficient way to do this, we should of course directly access it but its more than fast enough
933             for (y = 0; y < s->height; y++) {
934                 double w = 1 / sqrt(nb_freq);
935                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
936                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
937                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
938                 a = FFMIN(a, 255);
939                 b = FFMIN(b, 255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor, 0);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         if (!s->paused)
949             s->xpos++;
950         if (s->xpos >= s->width)
951             s->xpos= s->xleft;
952     }
953 }
954
955 static void stream_close(VideoState *is)
956 {
957     VideoPicture *vp;
958     int i;
959     /* XXX: use a special url_shutdown call to abort parse cleanly */
960     is->abort_request = 1;
961     SDL_WaitThread(is->read_tid, NULL);
962     SDL_WaitThread(is->refresh_tid, NULL);
963     packet_queue_destroy(&is->videoq);
964     packet_queue_destroy(&is->audioq);
965     packet_queue_destroy(&is->subtitleq);
966
967     /* free all pictures */
968     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
969         vp = &is->pictq[i];
970 #if CONFIG_AVFILTER
971         avfilter_unref_bufferp(&vp->picref);
972 #endif
973         if (vp->bmp) {
974             SDL_FreeYUVOverlay(vp->bmp);
975             vp->bmp = NULL;
976         }
977     }
978     SDL_DestroyMutex(is->pictq_mutex);
979     SDL_DestroyCond(is->pictq_cond);
980     SDL_DestroyMutex(is->subpq_mutex);
981     SDL_DestroyCond(is->subpq_cond);
982     SDL_DestroyCond(is->continue_read_thread);
983 #if !CONFIG_AVFILTER
984     if (is->img_convert_ctx)
985         sws_freeContext(is->img_convert_ctx);
986 #endif
987     av_free(is);
988 }
989
990 static void do_exit(VideoState *is)
991 {
992     if (is) {
993         stream_close(is);
994     }
995     av_lockmgr_register(NULL);
996     uninit_opts();
997 #if CONFIG_AVFILTER
998     avfilter_uninit();
999     av_freep(&vfilters);
1000 #endif
1001     avformat_network_deinit();
1002     if (show_status)
1003         printf("\n");
1004     SDL_Quit();
1005     av_log(NULL, AV_LOG_QUIET, "%s", "");
1006     exit(0);
1007 }
1008
1009 static void sigterm_handler(int sig)
1010 {
1011     exit(123);
1012 }
1013
1014 static int video_open(VideoState *is, int force_set_video_mode)
1015 {
1016     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1017     int w,h;
1018     VideoPicture *vp = &is->pictq[is->pictq_rindex];
1019     SDL_Rect rect;
1020
1021     if (is_full_screen) flags |= SDL_FULLSCREEN;
1022     else                flags |= SDL_RESIZABLE;
1023
1024     if (is_full_screen && fs_screen_width) {
1025         w = fs_screen_width;
1026         h = fs_screen_height;
1027     } else if (!is_full_screen && screen_width) {
1028         w = screen_width;
1029         h = screen_height;
1030     } else if (vp->width) {
1031         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1032         w = rect.w;
1033         h = rect.h;
1034     } else {
1035         w = 640;
1036         h = 480;
1037     }
1038     if (screen && is->width == screen->w && screen->w == w
1039        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1040         return 0;
1041     screen = SDL_SetVideoMode(w, h, 0, flags);
1042     if (!screen) {
1043         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1044         do_exit(is);
1045     }
1046     if (!window_title)
1047         window_title = input_filename;
1048     SDL_WM_SetCaption(window_title, window_title);
1049
1050     is->width  = screen->w;
1051     is->height = screen->h;
1052
1053     return 0;
1054 }
1055
1056 /* display the current picture, if any */
1057 static void video_display(VideoState *is)
1058 {
1059     if (!screen)
1060         video_open(is, 0);
1061     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1062         video_audio_display(is);
1063     else if (is->video_st)
1064         video_image_display(is);
1065 }
1066
1067 static int refresh_thread(void *opaque)
1068 {
1069     VideoState *is= opaque;
1070     while (!is->abort_request) {
1071         SDL_Event event;
1072         event.type = FF_REFRESH_EVENT;
1073         event.user.data1 = opaque;
1074         if (!is->refresh && (!is->paused || is->force_refresh)) {
1075             is->refresh = 1;
1076             SDL_PushEvent(&event);
1077         }
1078         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1079         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1080     }
1081     return 0;
1082 }
1083
1084 /* get the current audio clock value */
1085 static double get_audio_clock(VideoState *is)
1086 {
1087     if (is->paused) {
1088         return is->audio_current_pts;
1089     } else {
1090         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1091     }
1092 }
1093
1094 /* get the current video clock value */
1095 static double get_video_clock(VideoState *is)
1096 {
1097     if (is->paused) {
1098         return is->video_current_pts;
1099     } else {
1100         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1101     }
1102 }
1103
1104 /* get the current external clock value */
1105 static double get_external_clock(VideoState *is)
1106 {
1107     if (is->paused) {
1108         return is->external_clock;
1109     } else {
1110         return is->external_clock_drift + av_gettime() / 1000000.0;
1111     }
1112 }
1113
1114 static int get_master_sync_type(VideoState *is) {
1115     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1116         if (is->video_st)
1117             return AV_SYNC_VIDEO_MASTER;
1118         else
1119             return AV_SYNC_AUDIO_MASTER;
1120     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1121         if (is->audio_st)
1122             return AV_SYNC_AUDIO_MASTER;
1123         else
1124             return AV_SYNC_EXTERNAL_CLOCK;
1125     } else {
1126         return AV_SYNC_EXTERNAL_CLOCK;
1127     }
1128 }
1129
1130 /* get the current master clock value */
1131 static double get_master_clock(VideoState *is)
1132 {
1133     double val;
1134
1135     switch (get_master_sync_type(is)) {
1136         case AV_SYNC_VIDEO_MASTER:
1137             val = get_video_clock(is);
1138             break;
1139         case AV_SYNC_AUDIO_MASTER:
1140             val = get_audio_clock(is);
1141             break;
1142         default:
1143             val = get_external_clock(is);
1144             break;
1145     }
1146     return val;
1147 }
1148
1149 static void update_external_clock_pts(VideoState *is, double pts)
1150 {
1151    is->external_clock_time = av_gettime();
1152    is->external_clock = pts;
1153    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1154 }
1155
1156 static void check_external_clock_sync(VideoState *is, double pts) {
1157     if (fabs(get_external_clock(is) - pts) > AV_NOSYNC_THRESHOLD) {
1158         update_external_clock_pts(is, pts);
1159     }
1160 }
1161
1162 /* seek in the stream */
1163 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1164 {
1165     if (!is->seek_req) {
1166         is->seek_pos = pos;
1167         is->seek_rel = rel;
1168         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1169         if (seek_by_bytes)
1170             is->seek_flags |= AVSEEK_FLAG_BYTE;
1171         is->seek_req = 1;
1172     }
1173 }
1174
1175 /* pause or resume the video */
1176 static void stream_toggle_pause(VideoState *is)
1177 {
1178     if (is->paused) {
1179         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1180         if (is->read_pause_return != AVERROR(ENOSYS)) {
1181             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1182         }
1183         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1184     }
1185     update_external_clock_pts(is, get_external_clock(is));
1186     is->paused = !is->paused;
1187 }
1188
1189 static double compute_target_delay(double delay, VideoState *is)
1190 {
1191     double sync_threshold, diff;
1192
1193     /* update delay to follow master synchronisation source */
1194     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1195         /* if video is slave, we try to correct big delays by
1196            duplicating or deleting a frame */
1197         diff = get_video_clock(is) - get_master_clock(is);
1198
1199         /* skip or repeat frame. We take into account the
1200            delay to compute the threshold. I still don't know
1201            if it is the best guess */
1202         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1203         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1204             if (diff <= -sync_threshold)
1205                 delay = 0;
1206             else if (diff >= sync_threshold)
1207                 delay = 2 * delay;
1208         }
1209     }
1210
1211     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1212             delay, -diff);
1213
1214     return delay;
1215 }
1216
1217 static void pictq_next_picture(VideoState *is) {
1218     /* update queue size and signal for next picture */
1219     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1220         is->pictq_rindex = 0;
1221
1222     SDL_LockMutex(is->pictq_mutex);
1223     is->pictq_size--;
1224     SDL_CondSignal(is->pictq_cond);
1225     SDL_UnlockMutex(is->pictq_mutex);
1226 }
1227
1228 static void pictq_prev_picture(VideoState *is) {
1229     VideoPicture *prevvp;
1230     /* update queue size and signal for the previous picture */
1231     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1232     if (prevvp->allocated && !prevvp->skip) {
1233         SDL_LockMutex(is->pictq_mutex);
1234         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1235             if (--is->pictq_rindex == -1)
1236                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1237             is->pictq_size++;
1238         }
1239         SDL_CondSignal(is->pictq_cond);
1240         SDL_UnlockMutex(is->pictq_mutex);
1241     }
1242 }
1243
1244 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1245     double time = av_gettime() / 1000000.0;
1246     /* update current video pts */
1247     is->video_current_pts = pts;
1248     is->video_current_pts_drift = is->video_current_pts - time;
1249     is->video_current_pos = pos;
1250     is->frame_last_pts = pts;
1251     if (is->videoq.serial == serial)
1252         check_external_clock_sync(is, is->video_current_pts);
1253 }
1254
1255 /* called to display each frame */
1256 static void video_refresh(void *opaque)
1257 {
1258     VideoState *is = opaque;
1259     VideoPicture *vp;
1260     double time;
1261
1262     SubPicture *sp, *sp2;
1263
1264     if (is->video_st) {
1265         if (is->force_refresh)
1266             pictq_prev_picture(is);
1267 retry:
1268         if (is->pictq_size == 0) {
1269             SDL_LockMutex(is->pictq_mutex);
1270             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1271                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, 0);
1272                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1273             }
1274             SDL_UnlockMutex(is->pictq_mutex);
1275             // nothing to do, no picture to display in the que
1276         } else {
1277             double last_duration, duration, delay;
1278             /* dequeue the picture */
1279             vp = &is->pictq[is->pictq_rindex];
1280
1281             if (vp->skip) {
1282                 pictq_next_picture(is);
1283                 goto retry;
1284             }
1285
1286             if (is->paused)
1287                 goto display;
1288
1289             /* compute nominal last_duration */
1290             last_duration = vp->pts - is->frame_last_pts;
1291             if (last_duration > 0 && last_duration < 10.0) {
1292                 /* if duration of the last frame was sane, update last_duration in video state */
1293                 is->frame_last_duration = last_duration;
1294             }
1295             delay = compute_target_delay(is->frame_last_duration, is);
1296
1297             time= av_gettime()/1000000.0;
1298             if (time < is->frame_timer + delay)
1299                 return;
1300
1301             if (delay > 0)
1302                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1303
1304             SDL_LockMutex(is->pictq_mutex);
1305             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1306             SDL_UnlockMutex(is->pictq_mutex);
1307
1308             if (is->pictq_size > 1) {
1309                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1310                 duration = nextvp->pts - vp->pts;
1311                 if((framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1312                     is->frame_drops_late++;
1313                     pictq_next_picture(is);
1314                     goto retry;
1315                 }
1316             }
1317
1318             if (is->subtitle_st) {
1319                 if (is->subtitle_stream_changed) {
1320                     SDL_LockMutex(is->subpq_mutex);
1321
1322                     while (is->subpq_size) {
1323                         free_subpicture(&is->subpq[is->subpq_rindex]);
1324
1325                         /* update queue size and signal for next picture */
1326                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1327                             is->subpq_rindex = 0;
1328
1329                         is->subpq_size--;
1330                     }
1331                     is->subtitle_stream_changed = 0;
1332
1333                     SDL_CondSignal(is->subpq_cond);
1334                     SDL_UnlockMutex(is->subpq_mutex);
1335                 } else {
1336                     if (is->subpq_size > 0) {
1337                         sp = &is->subpq[is->subpq_rindex];
1338
1339                         if (is->subpq_size > 1)
1340                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1341                         else
1342                             sp2 = NULL;
1343
1344                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1345                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1346                         {
1347                             free_subpicture(sp);
1348
1349                             /* update queue size and signal for next picture */
1350                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1351                                 is->subpq_rindex = 0;
1352
1353                             SDL_LockMutex(is->subpq_mutex);
1354                             is->subpq_size--;
1355                             SDL_CondSignal(is->subpq_cond);
1356                             SDL_UnlockMutex(is->subpq_mutex);
1357                         }
1358                     }
1359                 }
1360             }
1361
1362 display:
1363             /* display picture */
1364             if (!display_disable)
1365                 video_display(is);
1366
1367             pictq_next_picture(is);
1368         }
1369     } else if (is->audio_st) {
1370         /* draw the next audio frame */
1371
1372         /* if only audio stream, then display the audio bars (better
1373            than nothing, just to test the implementation */
1374
1375         /* display picture */
1376         if (!display_disable)
1377             video_display(is);
1378     }
1379     is->force_refresh = 0;
1380     if (show_status) {
1381         static int64_t last_time;
1382         int64_t cur_time;
1383         int aqsize, vqsize, sqsize;
1384         double av_diff;
1385
1386         cur_time = av_gettime();
1387         if (!last_time || (cur_time - last_time) >= 30000) {
1388             aqsize = 0;
1389             vqsize = 0;
1390             sqsize = 0;
1391             if (is->audio_st)
1392                 aqsize = is->audioq.size;
1393             if (is->video_st)
1394                 vqsize = is->videoq.size;
1395             if (is->subtitle_st)
1396                 sqsize = is->subtitleq.size;
1397             av_diff = 0;
1398             if (is->audio_st && is->video_st)
1399                 av_diff = get_audio_clock(is) - get_video_clock(is);
1400             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1401                    get_master_clock(is),
1402                    av_diff,
1403                    is->frame_drops_early + is->frame_drops_late,
1404                    aqsize / 1024,
1405                    vqsize / 1024,
1406                    sqsize,
1407                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1408                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1409             fflush(stdout);
1410             last_time = cur_time;
1411         }
1412     }
1413 }
1414
1415 /* allocate a picture (needs to do that in main thread to avoid
1416    potential locking problems */
1417 static void alloc_picture(VideoState *is)
1418 {
1419     VideoPicture *vp;
1420
1421     vp = &is->pictq[is->pictq_windex];
1422
1423     if (vp->bmp)
1424         SDL_FreeYUVOverlay(vp->bmp);
1425
1426 #if CONFIG_AVFILTER
1427     avfilter_unref_bufferp(&vp->picref);
1428 #endif
1429
1430     video_open(is, 0);
1431
1432     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1433                                    SDL_YV12_OVERLAY,
1434                                    screen);
1435     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1436         /* SDL allocates a buffer smaller than requested if the video
1437          * overlay hardware is unable to support the requested size. */
1438         fprintf(stderr, "Error: the video system does not support an image\n"
1439                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1440                         "to reduce the image size.\n", vp->width, vp->height );
1441         do_exit(is);
1442     }
1443
1444     SDL_LockMutex(is->pictq_mutex);
1445     vp->allocated = 1;
1446     SDL_CondSignal(is->pictq_cond);
1447     SDL_UnlockMutex(is->pictq_mutex);
1448 }
1449
1450 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
1451 {
1452     VideoPicture *vp;
1453     double frame_delay, pts = pts1;
1454
1455     /* compute the exact PTS for the picture if it is omitted in the stream
1456      * pts1 is the dts of the pkt / pts of the frame */
1457     if (pts != 0) {
1458         /* update video clock with pts, if present */
1459         is->video_clock = pts;
1460     } else {
1461         pts = is->video_clock;
1462     }
1463     /* update video clock for next frame */
1464     frame_delay = av_q2d(is->video_st->codec->time_base);
1465     /* for MPEG2, the frame can be repeated, so we update the
1466        clock accordingly */
1467     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1468     is->video_clock += frame_delay;
1469
1470 #if defined(DEBUG_SYNC) && 0
1471     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1472            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1473 #endif
1474
1475     /* wait until we have space to put a new picture */
1476     SDL_LockMutex(is->pictq_mutex);
1477
1478     /* keep the last already displayed picture in the queue */
1479     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1480            !is->videoq.abort_request) {
1481         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1482     }
1483     SDL_UnlockMutex(is->pictq_mutex);
1484
1485     if (is->videoq.abort_request)
1486         return -1;
1487
1488     vp = &is->pictq[is->pictq_windex];
1489
1490 #if CONFIG_AVFILTER
1491     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1492 #else
1493     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1494 #endif
1495
1496     /* alloc or resize hardware picture buffer */
1497     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1498         vp->width  != src_frame->width ||
1499         vp->height != src_frame->height) {
1500         SDL_Event event;
1501
1502         vp->allocated  = 0;
1503         vp->reallocate = 0;
1504         vp->width = src_frame->width;
1505         vp->height = src_frame->height;
1506
1507         /* the allocation must be done in the main thread to avoid
1508            locking problems. */
1509         event.type = FF_ALLOC_EVENT;
1510         event.user.data1 = is;
1511         SDL_PushEvent(&event);
1512
1513         /* wait until the picture is allocated */
1514         SDL_LockMutex(is->pictq_mutex);
1515         while (!vp->allocated && !is->videoq.abort_request) {
1516             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1517         }
1518         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1519         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1520             while (!vp->allocated) {
1521                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1522             }
1523         }
1524         SDL_UnlockMutex(is->pictq_mutex);
1525
1526         if (is->videoq.abort_request)
1527             return -1;
1528     }
1529
1530     /* if the frame is not skipped, then display it */
1531     if (vp->bmp) {
1532         AVPicture pict = { { 0 } };
1533 #if CONFIG_AVFILTER
1534         avfilter_unref_bufferp(&vp->picref);
1535         vp->picref = src_frame->opaque;
1536 #endif
1537
1538         /* get a pointer on the bitmap */
1539         SDL_LockYUVOverlay (vp->bmp);
1540
1541         pict.data[0] = vp->bmp->pixels[0];
1542         pict.data[1] = vp->bmp->pixels[2];
1543         pict.data[2] = vp->bmp->pixels[1];
1544
1545         pict.linesize[0] = vp->bmp->pitches[0];
1546         pict.linesize[1] = vp->bmp->pitches[2];
1547         pict.linesize[2] = vp->bmp->pitches[1];
1548
1549 #if CONFIG_AVFILTER
1550         // FIXME use direct rendering
1551         av_picture_copy(&pict, (AVPicture *)src_frame,
1552                         src_frame->format, vp->width, vp->height);
1553 #else
1554         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1555         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1556             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1557             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1558         if (is->img_convert_ctx == NULL) {
1559             fprintf(stderr, "Cannot initialize the conversion context\n");
1560             exit(1);
1561         }
1562         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1563                   0, vp->height, pict.data, pict.linesize);
1564 #endif
1565         /* update the bitmap content */
1566         SDL_UnlockYUVOverlay(vp->bmp);
1567
1568         vp->pts = pts;
1569         vp->pos = pos;
1570         vp->skip = 0;
1571         vp->serial = serial;
1572
1573         /* now we can update the picture count */
1574         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1575             is->pictq_windex = 0;
1576         SDL_LockMutex(is->pictq_mutex);
1577         is->pictq_size++;
1578         SDL_UnlockMutex(is->pictq_mutex);
1579     }
1580     return 0;
1581 }
1582
1583 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1584 {
1585     int got_picture, i;
1586
1587     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1588         return -1;
1589
1590     if (pkt->data == flush_pkt.data) {
1591         avcodec_flush_buffers(is->video_st->codec);
1592
1593         SDL_LockMutex(is->pictq_mutex);
1594         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1595         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1596             is->pictq[i].skip = 1;
1597         }
1598         while (is->pictq_size && !is->videoq.abort_request) {
1599             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1600         }
1601         is->video_current_pos = -1;
1602         is->frame_last_pts = AV_NOPTS_VALUE;
1603         is->frame_last_duration = 0;
1604         is->frame_timer = (double)av_gettime() / 1000000.0;
1605         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1606         SDL_UnlockMutex(is->pictq_mutex);
1607
1608         return 0;
1609     }
1610
1611     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1612         return 0;
1613
1614     if (got_picture) {
1615         int ret = 1;
1616
1617         if (decoder_reorder_pts == -1) {
1618             *pts = av_frame_get_best_effort_timestamp(frame);
1619         } else if (decoder_reorder_pts) {
1620             *pts = frame->pkt_pts;
1621         } else {
1622             *pts = frame->pkt_dts;
1623         }
1624
1625         if (*pts == AV_NOPTS_VALUE) {
1626             *pts = 0;
1627         }
1628
1629         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1630             SDL_LockMutex(is->pictq_mutex);
1631             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1632                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1633                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1634                 double ptsdiff = dpts - is->frame_last_pts;
1635                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1636                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1637                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1638                     is->frame_last_dropped_pos = pkt->pos;
1639                     is->frame_last_dropped_pts = dpts;
1640                     is->frame_drops_early++;
1641                     ret = 0;
1642                 }
1643             }
1644             SDL_UnlockMutex(is->pictq_mutex);
1645         }
1646
1647         return ret;
1648     }
1649     return 0;
1650 }
1651
1652 #if CONFIG_AVFILTER
1653 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1654                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1655 {
1656     int ret;
1657     AVFilterInOut *outputs = NULL, *inputs = NULL;
1658
1659     if (filtergraph) {
1660         outputs = avfilter_inout_alloc();
1661         inputs  = avfilter_inout_alloc();
1662         if (!outputs || !inputs) {
1663             ret = AVERROR(ENOMEM);
1664             goto fail;
1665         }
1666
1667         outputs->name       = av_strdup("in");
1668         outputs->filter_ctx = source_ctx;
1669         outputs->pad_idx    = 0;
1670         outputs->next       = NULL;
1671
1672         inputs->name        = av_strdup("out");
1673         inputs->filter_ctx  = sink_ctx;
1674         inputs->pad_idx     = 0;
1675         inputs->next        = NULL;
1676
1677         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1678             goto fail;
1679     } else {
1680         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1681             goto fail;
1682     }
1683
1684     ret = avfilter_graph_config(graph, NULL);
1685 fail:
1686     avfilter_inout_free(&outputs);
1687     avfilter_inout_free(&inputs);
1688     return ret;
1689 }
1690
1691 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1692 {
1693     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1694     char sws_flags_str[128];
1695     char buffersrc_args[256];
1696     int ret;
1697     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1698     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1699     AVCodecContext *codec = is->video_st->codec;
1700
1701     if (!buffersink_params)
1702         return AVERROR(ENOMEM);
1703
1704     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1705     graph->scale_sws_opts = av_strdup(sws_flags_str);
1706
1707     snprintf(buffersrc_args, sizeof(buffersrc_args),
1708              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1709              codec->width, codec->height, codec->pix_fmt,
1710              is->video_st->time_base.num, is->video_st->time_base.den,
1711              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1712
1713     if ((ret = avfilter_graph_create_filter(&filt_src,
1714                                             avfilter_get_by_name("buffer"),
1715                                             "ffplay_buffer", buffersrc_args, NULL,
1716                                             graph)) < 0)
1717         goto fail;
1718
1719     buffersink_params->pixel_fmts = pix_fmts;
1720     ret = avfilter_graph_create_filter(&filt_out,
1721                                        avfilter_get_by_name("ffbuffersink"),
1722                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1723     if (ret < 0)
1724         goto fail;
1725
1726     /* SDL YUV code is not handling odd width/height for some driver
1727      * combinations, therefore we crop the picture to an even width/height. */
1728     if ((ret = avfilter_graph_create_filter(&filt_crop,
1729                                             avfilter_get_by_name("crop"),
1730                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1731         goto fail;
1732     if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1733         goto fail;
1734
1735     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1736         goto fail;
1737
1738     is->in_video_filter  = filt_src;
1739     is->out_video_filter = filt_out;
1740
1741 fail:
1742     av_freep(&buffersink_params);
1743     return ret;
1744 }
1745
1746 #endif  /* CONFIG_AVFILTER */
1747
1748 static int video_thread(void *arg)
1749 {
1750     AVPacket pkt = { 0 };
1751     VideoState *is = arg;
1752     AVFrame *frame = avcodec_alloc_frame();
1753     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1754     double pts;
1755     int ret;
1756     int serial = 0;
1757
1758 #if CONFIG_AVFILTER
1759     AVCodecContext *codec = is->video_st->codec;
1760     AVFilterGraph *graph = avfilter_graph_alloc();
1761     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1762     int last_w = 0;
1763     int last_h = 0;
1764     enum AVPixelFormat last_format = -2;
1765
1766     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1767         is->use_dr1 = 1;
1768         codec->get_buffer     = codec_get_buffer;
1769         codec->release_buffer = codec_release_buffer;
1770         codec->opaque         = &is->buffer_pool;
1771     }
1772 #endif
1773
1774     for (;;) {
1775 #if CONFIG_AVFILTER
1776         AVFilterBufferRef *picref;
1777         AVRational tb;
1778 #endif
1779         while (is->paused && !is->videoq.abort_request)
1780             SDL_Delay(10);
1781
1782         avcodec_get_frame_defaults(frame);
1783         av_free_packet(&pkt);
1784
1785         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1786         if (ret < 0)
1787             goto the_end;
1788
1789         if (!ret)
1790             continue;
1791
1792 #if CONFIG_AVFILTER
1793         if (   last_w != is->video_st->codec->width
1794             || last_h != is->video_st->codec->height
1795             || last_format != is->video_st->codec->pix_fmt) {
1796             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1797                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1798             avfilter_graph_free(&graph);
1799             graph = avfilter_graph_alloc();
1800             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1801                 SDL_Event event;
1802                 event.type = FF_QUIT_EVENT;
1803                 event.user.data1 = is;
1804                 SDL_PushEvent(&event);
1805                 av_free_packet(&pkt);
1806                 goto the_end;
1807             }
1808             filt_in  = is->in_video_filter;
1809             filt_out = is->out_video_filter;
1810             last_w = is->video_st->codec->width;
1811             last_h = is->video_st->codec->height;
1812             last_format = is->video_st->codec->pix_fmt;
1813         }
1814
1815         frame->pts = pts_int;
1816         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1817         if (is->use_dr1 && frame->opaque) {
1818             FrameBuffer      *buf = frame->opaque;
1819             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1820                                         frame->data, frame->linesize,
1821                                         AV_PERM_READ | AV_PERM_PRESERVE,
1822                                         frame->width, frame->height,
1823                                         frame->format);
1824
1825             avfilter_copy_frame_props(fb, frame);
1826             fb->buf->priv           = buf;
1827             fb->buf->free           = filter_release_buffer;
1828
1829             buf->refcount++;
1830             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1831
1832         } else
1833             av_buffersrc_write_frame(filt_in, frame);
1834
1835         av_free_packet(&pkt);
1836
1837         while (ret >= 0) {
1838             is->frame_last_returned_time = av_gettime() / 1000000.0;
1839
1840             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1841             if (ret < 0) {
1842                 ret = 0;
1843                 break;
1844             }
1845
1846             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1847             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1848                 is->frame_last_filter_delay = 0;
1849
1850             avfilter_copy_buf_props(frame, picref);
1851
1852             pts_int = picref->pts;
1853             tb      = filt_out->inputs[0]->time_base;
1854             pos     = picref->pos;
1855             frame->opaque = picref;
1856
1857             if (av_cmp_q(tb, is->video_st->time_base)) {
1858                 av_unused int64_t pts1 = pts_int;
1859                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1860                 av_dlog(NULL, "video_thread(): "
1861                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1862                         tb.num, tb.den, pts1,
1863                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1864             }
1865             pts = pts_int * av_q2d(is->video_st->time_base);
1866             ret = queue_picture(is, frame, pts, pos, serial);
1867         }
1868 #else
1869         pts = pts_int * av_q2d(is->video_st->time_base);
1870         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1871 #endif
1872
1873         if (ret < 0)
1874             goto the_end;
1875
1876         if (is->step)
1877             stream_toggle_pause(is);
1878     }
1879  the_end:
1880     avcodec_flush_buffers(is->video_st->codec);
1881 #if CONFIG_AVFILTER
1882     avfilter_graph_free(&graph);
1883 #endif
1884     av_free_packet(&pkt);
1885     avcodec_free_frame(&frame);
1886     return 0;
1887 }
1888
1889 static int subtitle_thread(void *arg)
1890 {
1891     VideoState *is = arg;
1892     SubPicture *sp;
1893     AVPacket pkt1, *pkt = &pkt1;
1894     int got_subtitle;
1895     double pts;
1896     int i, j;
1897     int r, g, b, y, u, v, a;
1898
1899     for (;;) {
1900         while (is->paused && !is->subtitleq.abort_request) {
1901             SDL_Delay(10);
1902         }
1903         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1904             break;
1905
1906         if (pkt->data == flush_pkt.data) {
1907             avcodec_flush_buffers(is->subtitle_st->codec);
1908             continue;
1909         }
1910         SDL_LockMutex(is->subpq_mutex);
1911         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1912                !is->subtitleq.abort_request) {
1913             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1914         }
1915         SDL_UnlockMutex(is->subpq_mutex);
1916
1917         if (is->subtitleq.abort_request)
1918             return 0;
1919
1920         sp = &is->subpq[is->subpq_windex];
1921
1922        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1923            this packet, if any */
1924         pts = 0;
1925         if (pkt->pts != AV_NOPTS_VALUE)
1926             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1927
1928         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1929                                  &got_subtitle, pkt);
1930         if (got_subtitle && sp->sub.format == 0) {
1931             if (sp->sub.pts != AV_NOPTS_VALUE)
1932                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1933             sp->pts = pts;
1934
1935             for (i = 0; i < sp->sub.num_rects; i++)
1936             {
1937                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1938                 {
1939                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1940                     y = RGB_TO_Y_CCIR(r, g, b);
1941                     u = RGB_TO_U_CCIR(r, g, b, 0);
1942                     v = RGB_TO_V_CCIR(r, g, b, 0);
1943                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1944                 }
1945             }
1946
1947             /* now we can update the picture count */
1948             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1949                 is->subpq_windex = 0;
1950             SDL_LockMutex(is->subpq_mutex);
1951             is->subpq_size++;
1952             SDL_UnlockMutex(is->subpq_mutex);
1953         }
1954         av_free_packet(pkt);
1955     }
1956     return 0;
1957 }
1958
1959 /* copy samples for viewing in editor window */
1960 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1961 {
1962     int size, len;
1963
1964     size = samples_size / sizeof(short);
1965     while (size > 0) {
1966         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1967         if (len > size)
1968             len = size;
1969         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1970         samples += len;
1971         is->sample_array_index += len;
1972         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1973             is->sample_array_index = 0;
1974         size -= len;
1975     }
1976 }
1977
1978 /* return the wanted number of samples to get better sync if sync_type is video
1979  * or external master clock */
1980 static int synchronize_audio(VideoState *is, int nb_samples)
1981 {
1982     int wanted_nb_samples = nb_samples;
1983
1984     /* if not master, then we try to remove or add samples to correct the clock */
1985     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
1986         double diff, avg_diff;
1987         int min_nb_samples, max_nb_samples;
1988
1989         diff = get_audio_clock(is) - get_master_clock(is);
1990
1991         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1992             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1993             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1994                 /* not enough measures to have a correct estimate */
1995                 is->audio_diff_avg_count++;
1996             } else {
1997                 /* estimate the A-V difference */
1998                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1999
2000                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2001                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2002                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2003                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2004                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2005                 }
2006                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2007                         diff, avg_diff, wanted_nb_samples - nb_samples,
2008                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2009             }
2010         } else {
2011             /* too big difference : may be initial PTS errors, so
2012                reset A-V filter */
2013             is->audio_diff_avg_count = 0;
2014             is->audio_diff_cum       = 0;
2015         }
2016     }
2017
2018     return wanted_nb_samples;
2019 }
2020
2021 /* decode one audio frame and returns its uncompressed size */
2022 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2023 {
2024     AVPacket *pkt_temp = &is->audio_pkt_temp;
2025     AVPacket *pkt = &is->audio_pkt;
2026     AVCodecContext *dec = is->audio_st->codec;
2027     int len1, len2, data_size, resampled_data_size;
2028     int64_t dec_channel_layout;
2029     int got_frame;
2030     double pts;
2031     int new_packet = 0;
2032     int flush_complete = 0;
2033     int wanted_nb_samples;
2034
2035     for (;;) {
2036         /* NOTE: the audio packet can contain several frames */
2037         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2038             if (!is->frame) {
2039                 if (!(is->frame = avcodec_alloc_frame()))
2040                     return AVERROR(ENOMEM);
2041             } else
2042                 avcodec_get_frame_defaults(is->frame);
2043
2044             if (is->paused)
2045                 return -1;
2046
2047             if (flush_complete)
2048                 break;
2049             new_packet = 0;
2050             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2051             if (len1 < 0) {
2052                 /* if error, we skip the frame */
2053                 pkt_temp->size = 0;
2054                 break;
2055             }
2056
2057             pkt_temp->data += len1;
2058             pkt_temp->size -= len1;
2059
2060             if (!got_frame) {
2061                 /* stop sending empty packets if the decoder is finished */
2062                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2063                     flush_complete = 1;
2064                 continue;
2065             }
2066             data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
2067                                                    is->frame->nb_samples,
2068                                                    is->frame->format, 1);
2069
2070             dec_channel_layout =
2071                 (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2072                 is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
2073             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2074
2075             if (is->frame->format        != is->audio_src.fmt            ||
2076                 dec_channel_layout       != is->audio_src.channel_layout ||
2077                 is->frame->sample_rate   != is->audio_src.freq           ||
2078                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2079                 swr_free(&is->swr_ctx);
2080                 is->swr_ctx = swr_alloc_set_opts(NULL,
2081                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2082                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2083                                                  0, NULL);
2084                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2085                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2086                         is->frame->sample_rate,   av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
2087                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2088                     break;
2089                 }
2090                 is->audio_src.channel_layout = dec_channel_layout;
2091                 is->audio_src.channels = is->frame->channels;
2092                 is->audio_src.freq = is->frame->sample_rate;
2093                 is->audio_src.fmt = is->frame->format;
2094             }
2095
2096             if (is->swr_ctx) {
2097                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2098                 uint8_t *out[] = {is->audio_buf2};
2099                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2100                 if (wanted_nb_samples != is->frame->nb_samples) {
2101                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2102                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2103                         fprintf(stderr, "swr_set_compensation() failed\n");
2104                         break;
2105                     }
2106                 }
2107                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2108                 if (len2 < 0) {
2109                     fprintf(stderr, "swr_convert() failed\n");
2110                     break;
2111                 }
2112                 if (len2 == out_count) {
2113                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2114                     swr_init(is->swr_ctx);
2115                 }
2116                 is->audio_buf = is->audio_buf2;
2117                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2118             } else {
2119                 is->audio_buf = is->frame->data[0];
2120                 resampled_data_size = data_size;
2121             }
2122
2123             /* if no pts, then compute it */
2124             pts = is->audio_clock;
2125             *pts_ptr = pts;
2126             is->audio_clock += (double)data_size /
2127                 (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
2128 #ifdef DEBUG
2129             {
2130                 static double last_clock;
2131                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2132                        is->audio_clock - last_clock,
2133                        is->audio_clock, pts);
2134                 last_clock = is->audio_clock;
2135             }
2136 #endif
2137             return resampled_data_size;
2138         }
2139
2140         /* free the current packet */
2141         if (pkt->data)
2142             av_free_packet(pkt);
2143         memset(pkt_temp, 0, sizeof(*pkt_temp));
2144
2145         if (is->paused || is->audioq.abort_request) {
2146             return -1;
2147         }
2148
2149         if (is->audioq.nb_packets == 0)
2150             SDL_CondSignal(is->continue_read_thread);
2151
2152         /* read next packet */
2153         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2154             return -1;
2155
2156         if (pkt->data == flush_pkt.data) {
2157             avcodec_flush_buffers(dec);
2158             flush_complete = 0;
2159         }
2160
2161         *pkt_temp = *pkt;
2162
2163         /* if update the audio clock with the pts */
2164         if (pkt->pts != AV_NOPTS_VALUE) {
2165             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2166         }
2167     }
2168 }
2169
2170 /* prepare a new audio buffer */
2171 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2172 {
2173     VideoState *is = opaque;
2174     int audio_size, len1;
2175     int bytes_per_sec;
2176     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2177     double pts;
2178
2179     audio_callback_time = av_gettime();
2180
2181     while (len > 0) {
2182         if (is->audio_buf_index >= is->audio_buf_size) {
2183            audio_size = audio_decode_frame(is, &pts);
2184            if (audio_size < 0) {
2185                 /* if error, just output silence */
2186                is->audio_buf      = is->silence_buf;
2187                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2188            } else {
2189                if (is->show_mode != SHOW_MODE_VIDEO)
2190                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2191                is->audio_buf_size = audio_size;
2192            }
2193            is->audio_buf_index = 0;
2194         }
2195         len1 = is->audio_buf_size - is->audio_buf_index;
2196         if (len1 > len)
2197             len1 = len;
2198         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2199         len -= len1;
2200         stream += len1;
2201         is->audio_buf_index += len1;
2202     }
2203     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2204     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2205     /* Let's assume the audio driver that is used by SDL has two periods. */
2206     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2207     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2208     if (is->audioq.serial == is->audio_pkt_temp_serial)
2209         check_external_clock_sync(is, is->audio_current_pts);
2210 }
2211
2212 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2213 {
2214     SDL_AudioSpec wanted_spec, spec;
2215     const char *env;
2216     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2217
2218     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2219     if (env) {
2220         wanted_nb_channels = atoi(env);
2221         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2222     }
2223     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2224         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2225         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2226     }
2227     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2228     wanted_spec.freq = wanted_sample_rate;
2229     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2230         fprintf(stderr, "Invalid sample rate or channel count!\n");
2231         return -1;
2232     }
2233     wanted_spec.format = AUDIO_S16SYS;
2234     wanted_spec.silence = 0;
2235     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2236     wanted_spec.callback = sdl_audio_callback;
2237     wanted_spec.userdata = opaque;
2238     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2239         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2240         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2241         if (!wanted_spec.channels) {
2242             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2243             return -1;
2244         }
2245         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2246     }
2247     if (spec.format != AUDIO_S16SYS) {
2248         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2249         return -1;
2250     }
2251     if (spec.channels != wanted_spec.channels) {
2252         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2253         if (!wanted_channel_layout) {
2254             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2255             return -1;
2256         }
2257     }
2258
2259     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2260     audio_hw_params->freq = spec.freq;
2261     audio_hw_params->channel_layout = wanted_channel_layout;
2262     audio_hw_params->channels =  spec.channels;
2263     return spec.size;
2264 }
2265
2266 /* open a given stream. Return 0 if OK */
2267 static int stream_component_open(VideoState *is, int stream_index)
2268 {
2269     AVFormatContext *ic = is->ic;
2270     AVCodecContext *avctx;
2271     AVCodec *codec;
2272     AVDictionary *opts;
2273     AVDictionaryEntry *t = NULL;
2274
2275     if (stream_index < 0 || stream_index >= ic->nb_streams)
2276         return -1;
2277     avctx = ic->streams[stream_index]->codec;
2278
2279     codec = avcodec_find_decoder(avctx->codec_id);
2280     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2281
2282     switch(avctx->codec_type){
2283         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2284         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2285         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2286     }
2287     if (!codec)
2288         return -1;
2289
2290     avctx->workaround_bugs   = workaround_bugs;
2291     avctx->lowres            = lowres;
2292     if(avctx->lowres > codec->max_lowres){
2293         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2294                 codec->max_lowres);
2295         avctx->lowres= codec->max_lowres;
2296     }
2297     avctx->idct_algo         = idct;
2298     avctx->skip_frame        = skip_frame;
2299     avctx->skip_idct         = skip_idct;
2300     avctx->skip_loop_filter  = skip_loop_filter;
2301     avctx->error_concealment = error_concealment;
2302
2303     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2304     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2305     if(codec->capabilities & CODEC_CAP_DR1)
2306         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2307
2308     if (!av_dict_get(opts, "threads", NULL, 0))
2309         av_dict_set(&opts, "threads", "auto", 0);
2310     if (!codec ||
2311         avcodec_open2(avctx, codec, &opts) < 0)
2312         return -1;
2313     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2314         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2315         return AVERROR_OPTION_NOT_FOUND;
2316     }
2317
2318     /* prepare audio output */
2319     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2320         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2321         if (audio_hw_buf_size < 0)
2322             return -1;
2323         is->audio_hw_buf_size = audio_hw_buf_size;
2324         is->audio_tgt = is->audio_src;
2325     }
2326
2327     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2328     switch (avctx->codec_type) {
2329     case AVMEDIA_TYPE_AUDIO:
2330         is->audio_stream = stream_index;
2331         is->audio_st = ic->streams[stream_index];
2332         is->audio_buf_size  = 0;
2333         is->audio_buf_index = 0;
2334
2335         /* init averaging filter */
2336         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2337         is->audio_diff_avg_count = 0;
2338         /* since we do not have a precise anough audio fifo fullness,
2339            we correct audio sync only if larger than this threshold */
2340         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2341
2342         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2343         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2344         packet_queue_start(&is->audioq);
2345         SDL_PauseAudio(0);
2346         break;
2347     case AVMEDIA_TYPE_VIDEO:
2348         is->video_stream = stream_index;
2349         is->video_st = ic->streams[stream_index];
2350
2351         packet_queue_start(&is->videoq);
2352         is->video_tid = SDL_CreateThread(video_thread, is);
2353         break;
2354     case AVMEDIA_TYPE_SUBTITLE:
2355         is->subtitle_stream = stream_index;
2356         is->subtitle_st = ic->streams[stream_index];
2357         packet_queue_start(&is->subtitleq);
2358
2359         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2360         break;
2361     default:
2362         break;
2363     }
2364     return 0;
2365 }
2366
2367 static void stream_component_close(VideoState *is, int stream_index)
2368 {
2369     AVFormatContext *ic = is->ic;
2370     AVCodecContext *avctx;
2371
2372     if (stream_index < 0 || stream_index >= ic->nb_streams)
2373         return;
2374     avctx = ic->streams[stream_index]->codec;
2375
2376     switch (avctx->codec_type) {
2377     case AVMEDIA_TYPE_AUDIO:
2378         packet_queue_abort(&is->audioq);
2379
2380         SDL_CloseAudio();
2381
2382         packet_queue_flush(&is->audioq);
2383         av_free_packet(&is->audio_pkt);
2384         swr_free(&is->swr_ctx);
2385         av_freep(&is->audio_buf1);
2386         is->audio_buf = NULL;
2387         avcodec_free_frame(&is->frame);
2388
2389         if (is->rdft) {
2390             av_rdft_end(is->rdft);
2391             av_freep(&is->rdft_data);
2392             is->rdft = NULL;
2393             is->rdft_bits = 0;
2394         }
2395         break;
2396     case AVMEDIA_TYPE_VIDEO:
2397         packet_queue_abort(&is->videoq);
2398
2399         /* note: we also signal this mutex to make sure we deblock the
2400            video thread in all cases */
2401         SDL_LockMutex(is->pictq_mutex);
2402         SDL_CondSignal(is->pictq_cond);
2403         SDL_UnlockMutex(is->pictq_mutex);
2404
2405         SDL_WaitThread(is->video_tid, NULL);
2406
2407         packet_queue_flush(&is->videoq);
2408         break;
2409     case AVMEDIA_TYPE_SUBTITLE:
2410         packet_queue_abort(&is->subtitleq);
2411
2412         /* note: we also signal this mutex to make sure we deblock the
2413            video thread in all cases */
2414         SDL_LockMutex(is->subpq_mutex);
2415         is->subtitle_stream_changed = 1;
2416
2417         SDL_CondSignal(is->subpq_cond);
2418         SDL_UnlockMutex(is->subpq_mutex);
2419
2420         SDL_WaitThread(is->subtitle_tid, NULL);
2421
2422         packet_queue_flush(&is->subtitleq);
2423         break;
2424     default:
2425         break;
2426     }
2427
2428     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2429     avcodec_close(avctx);
2430 #if CONFIG_AVFILTER
2431     free_buffer_pool(&is->buffer_pool);
2432 #endif
2433     switch (avctx->codec_type) {
2434     case AVMEDIA_TYPE_AUDIO:
2435         is->audio_st = NULL;
2436         is->audio_stream = -1;
2437         break;
2438     case AVMEDIA_TYPE_VIDEO:
2439         is->video_st = NULL;
2440         is->video_stream = -1;
2441         break;
2442     case AVMEDIA_TYPE_SUBTITLE:
2443         is->subtitle_st = NULL;
2444         is->subtitle_stream = -1;
2445         break;
2446     default:
2447         break;
2448     }
2449 }
2450
2451 static int decode_interrupt_cb(void *ctx)
2452 {
2453     VideoState *is = ctx;
2454     return is->abort_request;
2455 }
2456
2457 static int is_realtime(AVFormatContext *s)
2458 {
2459     if(   !strcmp(s->iformat->name, "rtp")
2460        || !strcmp(s->iformat->name, "rtsp")
2461        || !strcmp(s->iformat->name, "sdp")
2462     )
2463         return 1;
2464
2465     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2466                  || !strncmp(s->filename, "udp:", 4)
2467                 )
2468     )
2469         return 1;
2470     return 0;
2471 }
2472
2473 /* this thread gets the stream from the disk or the network */
2474 static int read_thread(void *arg)
2475 {
2476     VideoState *is = arg;
2477     AVFormatContext *ic = NULL;
2478     int err, i, ret;
2479     int st_index[AVMEDIA_TYPE_NB];
2480     AVPacket pkt1, *pkt = &pkt1;
2481     int eof = 0;
2482     int pkt_in_play_range = 0;
2483     AVDictionaryEntry *t;
2484     AVDictionary **opts;
2485     int orig_nb_streams;
2486     SDL_mutex *wait_mutex = SDL_CreateMutex();
2487
2488     memset(st_index, -1, sizeof(st_index));
2489     is->last_video_stream = is->video_stream = -1;
2490     is->last_audio_stream = is->audio_stream = -1;
2491     is->last_subtitle_stream = is->subtitle_stream = -1;
2492
2493     ic = avformat_alloc_context();
2494     ic->interrupt_callback.callback = decode_interrupt_cb;
2495     ic->interrupt_callback.opaque = is;
2496     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2497     if (err < 0) {
2498         print_error(is->filename, err);
2499         ret = -1;
2500         goto fail;
2501     }
2502     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2503         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2504         ret = AVERROR_OPTION_NOT_FOUND;
2505         goto fail;
2506     }
2507     is->ic = ic;
2508
2509     if (genpts)
2510         ic->flags |= AVFMT_FLAG_GENPTS;
2511
2512     opts = setup_find_stream_info_opts(ic, codec_opts);
2513     orig_nb_streams = ic->nb_streams;
2514
2515     err = avformat_find_stream_info(ic, opts);
2516     if (err < 0) {
2517         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2518         ret = -1;
2519         goto fail;
2520     }
2521     for (i = 0; i < orig_nb_streams; i++)
2522         av_dict_free(&opts[i]);
2523     av_freep(&opts);
2524
2525     if (ic->pb)
2526         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2527
2528     if (seek_by_bytes < 0)
2529         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2530
2531     /* if seeking requested, we execute it */
2532     if (start_time != AV_NOPTS_VALUE) {
2533         int64_t timestamp;
2534
2535         timestamp = start_time;
2536         /* add the stream start time */
2537         if (ic->start_time != AV_NOPTS_VALUE)
2538             timestamp += ic->start_time;
2539         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2540         if (ret < 0) {
2541             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2542                     is->filename, (double)timestamp / AV_TIME_BASE);
2543         }
2544     }
2545
2546     for (i = 0; i < ic->nb_streams; i++)
2547         ic->streams[i]->discard = AVDISCARD_ALL;
2548     if (!video_disable)
2549         st_index[AVMEDIA_TYPE_VIDEO] =
2550             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2551                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2552     if (!audio_disable)
2553         st_index[AVMEDIA_TYPE_AUDIO] =
2554             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2555                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2556                                 st_index[AVMEDIA_TYPE_VIDEO],
2557                                 NULL, 0);
2558     if (!video_disable)
2559         st_index[AVMEDIA_TYPE_SUBTITLE] =
2560             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2561                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2562                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2563                                  st_index[AVMEDIA_TYPE_AUDIO] :
2564                                  st_index[AVMEDIA_TYPE_VIDEO]),
2565                                 NULL, 0);
2566     if (show_status) {
2567         av_dump_format(ic, 0, is->filename, 0);
2568     }
2569
2570     is->show_mode = show_mode;
2571
2572     /* open the streams */
2573     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2574         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2575     }
2576
2577     ret = -1;
2578     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2579         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2580     }
2581     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2582     if (is->show_mode == SHOW_MODE_NONE)
2583         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2584
2585     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2586         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2587     }
2588
2589     if (is->video_stream < 0 && is->audio_stream < 0) {
2590         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2591         ret = -1;
2592         goto fail;
2593     }
2594
2595     if (infinite_buffer < 0 && is_realtime(ic))
2596         infinite_buffer = 1;
2597
2598     for (;;) {
2599         if (is->abort_request)
2600             break;
2601         if (is->paused != is->last_paused) {
2602             is->last_paused = is->paused;
2603             if (is->paused)
2604                 is->read_pause_return = av_read_pause(ic);
2605             else
2606                 av_read_play(ic);
2607         }
2608 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2609         if (is->paused &&
2610                 (!strcmp(ic->iformat->name, "rtsp") ||
2611                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2612             /* wait 10 ms to avoid trying to get another packet */
2613             /* XXX: horrible */
2614             SDL_Delay(10);
2615             continue;
2616         }
2617 #endif
2618         if (is->seek_req) {
2619             int64_t seek_target = is->seek_pos;
2620             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2621             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2622 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2623 //      of the seek_pos/seek_rel variables
2624
2625             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2626             if (ret < 0) {
2627                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2628             } else {
2629                 if (is->audio_stream >= 0) {
2630                     packet_queue_flush(&is->audioq);
2631                     packet_queue_put(&is->audioq, &flush_pkt);
2632                 }
2633                 if (is->subtitle_stream >= 0) {
2634                     packet_queue_flush(&is->subtitleq);
2635                     packet_queue_put(&is->subtitleq, &flush_pkt);
2636                 }
2637                 if (is->video_stream >= 0) {
2638                     packet_queue_flush(&is->videoq);
2639                     packet_queue_put(&is->videoq, &flush_pkt);
2640                 }
2641             }
2642             update_external_clock_pts(is, (seek_target + ic->start_time) / (double)AV_TIME_BASE);
2643             is->seek_req = 0;
2644             eof = 0;
2645         }
2646         if (is->que_attachments_req) {
2647             avformat_queue_attached_pictures(ic);
2648             is->que_attachments_req = 0;
2649         }
2650
2651         /* if the queue are full, no need to read more */
2652         if (infinite_buffer<1 &&
2653               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2654             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2655                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2656                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2657             /* wait 10 ms */
2658             SDL_LockMutex(wait_mutex);
2659             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2660             SDL_UnlockMutex(wait_mutex);
2661             continue;
2662         }
2663         if (eof) {
2664             if (is->video_stream >= 0) {
2665                 av_init_packet(pkt);
2666                 pkt->data = NULL;
2667                 pkt->size = 0;
2668                 pkt->stream_index = is->video_stream;
2669                 packet_queue_put(&is->videoq, pkt);
2670             }
2671             if (is->audio_stream >= 0 &&
2672                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2673                 av_init_packet(pkt);
2674                 pkt->data = NULL;
2675                 pkt->size = 0;
2676                 pkt->stream_index = is->audio_stream;
2677                 packet_queue_put(&is->audioq, pkt);
2678             }
2679             SDL_Delay(10);
2680             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2681                 if (loop != 1 && (!loop || --loop)) {
2682                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2683                 } else if (autoexit) {
2684                     ret = AVERROR_EOF;
2685                     goto fail;
2686                 }
2687             }
2688             eof=0;
2689             continue;
2690         }
2691         ret = av_read_frame(ic, pkt);
2692         if (ret < 0) {
2693             if (ret == AVERROR_EOF || url_feof(ic->pb))
2694                 eof = 1;
2695             if (ic->pb && ic->pb->error)
2696                 break;
2697             SDL_LockMutex(wait_mutex);
2698             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2699             SDL_UnlockMutex(wait_mutex);
2700             continue;
2701         }
2702         /* check if packet is in play range specified by user, then queue, otherwise discard */
2703         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2704                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2705                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2706                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2707                 <= ((double)duration / 1000000);
2708         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2709             packet_queue_put(&is->audioq, pkt);
2710         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2711             packet_queue_put(&is->videoq, pkt);
2712         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2713             packet_queue_put(&is->subtitleq, pkt);
2714         } else {
2715             av_free_packet(pkt);
2716         }
2717     }
2718     /* wait until the end */
2719     while (!is->abort_request) {
2720         SDL_Delay(100);
2721     }
2722
2723     ret = 0;
2724  fail:
2725     /* close each stream */
2726     if (is->audio_stream >= 0)
2727         stream_component_close(is, is->audio_stream);
2728     if (is->video_stream >= 0)
2729         stream_component_close(is, is->video_stream);
2730     if (is->subtitle_stream >= 0)
2731         stream_component_close(is, is->subtitle_stream);
2732     if (is->ic) {
2733         avformat_close_input(&is->ic);
2734     }
2735
2736     if (ret != 0) {
2737         SDL_Event event;
2738
2739         event.type = FF_QUIT_EVENT;
2740         event.user.data1 = is;
2741         SDL_PushEvent(&event);
2742     }
2743     SDL_DestroyMutex(wait_mutex);
2744     return 0;
2745 }
2746
2747 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2748 {
2749     VideoState *is;
2750
2751     is = av_mallocz(sizeof(VideoState));
2752     if (!is)
2753         return NULL;
2754     av_strlcpy(is->filename, filename, sizeof(is->filename));
2755     is->iformat = iformat;
2756     is->ytop    = 0;
2757     is->xleft   = 0;
2758
2759     /* start video display */
2760     is->pictq_mutex = SDL_CreateMutex();
2761     is->pictq_cond  = SDL_CreateCond();
2762
2763     is->subpq_mutex = SDL_CreateMutex();
2764     is->subpq_cond  = SDL_CreateCond();
2765
2766     packet_queue_init(&is->videoq);
2767     packet_queue_init(&is->audioq);
2768     packet_queue_init(&is->subtitleq);
2769
2770     is->continue_read_thread = SDL_CreateCond();
2771
2772     update_external_clock_pts(is, 0.0);
2773     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2774     is->video_current_pts_drift = is->audio_current_pts_drift;
2775     is->av_sync_type = av_sync_type;
2776     is->read_tid     = SDL_CreateThread(read_thread, is);
2777     if (!is->read_tid) {
2778         av_free(is);
2779         return NULL;
2780     }
2781     return is;
2782 }
2783
2784 static void stream_cycle_channel(VideoState *is, int codec_type)
2785 {
2786     AVFormatContext *ic = is->ic;
2787     int start_index, stream_index;
2788     int old_index;
2789     AVStream *st;
2790
2791     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2792         start_index = is->last_video_stream;
2793         old_index = is->video_stream;
2794     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2795         start_index = is->last_audio_stream;
2796         old_index = is->audio_stream;
2797     } else {
2798         start_index = is->last_subtitle_stream;
2799         old_index = is->subtitle_stream;
2800     }
2801     stream_index = start_index;
2802     for (;;) {
2803         if (++stream_index >= is->ic->nb_streams)
2804         {
2805             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2806             {
2807                 stream_index = -1;
2808                 is->last_subtitle_stream = -1;
2809                 goto the_end;
2810             }
2811             if (start_index == -1)
2812                 return;
2813             stream_index = 0;
2814         }
2815         if (stream_index == start_index)
2816             return;
2817         st = ic->streams[stream_index];
2818         if (st->codec->codec_type == codec_type) {
2819             /* check that parameters are OK */
2820             switch (codec_type) {
2821             case AVMEDIA_TYPE_AUDIO:
2822                 if (st->codec->sample_rate != 0 &&
2823                     st->codec->channels != 0)
2824                     goto the_end;
2825                 break;
2826             case AVMEDIA_TYPE_VIDEO:
2827             case AVMEDIA_TYPE_SUBTITLE:
2828                 goto the_end;
2829             default:
2830                 break;
2831             }
2832         }
2833     }
2834  the_end:
2835     stream_component_close(is, old_index);
2836     stream_component_open(is, stream_index);
2837     if (codec_type == AVMEDIA_TYPE_VIDEO)
2838         is->que_attachments_req = 1;
2839 }
2840
2841
2842 static void toggle_full_screen(VideoState *is)
2843 {
2844 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2845     /* OS X needs to reallocate the SDL overlays */
2846     int i;
2847     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2848         is->pictq[i].reallocate = 1;
2849 #endif
2850     is_full_screen = !is_full_screen;
2851     video_open(is, 1);
2852 }
2853
2854 static void toggle_pause(VideoState *is)
2855 {
2856     stream_toggle_pause(is);
2857     is->step = 0;
2858 }
2859
2860 static void step_to_next_frame(VideoState *is)
2861 {
2862     /* if the stream is paused unpause it, then step */
2863     if (is->paused)
2864         stream_toggle_pause(is);
2865     is->step = 1;
2866 }
2867
2868 static void toggle_audio_display(VideoState *is)
2869 {
2870     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2871     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2872     fill_rectangle(screen,
2873                 is->xleft, is->ytop, is->width, is->height,
2874                 bgcolor, 1);
2875 }
2876
2877 /* handle an event sent by the GUI */
2878 static void event_loop(VideoState *cur_stream)
2879 {
2880     SDL_Event event;
2881     double incr, pos, frac;
2882
2883     for (;;) {
2884         double x;
2885         SDL_WaitEvent(&event);
2886         switch (event.type) {
2887         case SDL_KEYDOWN:
2888             if (exit_on_keydown) {
2889                 do_exit(cur_stream);
2890                 break;
2891             }
2892             switch (event.key.keysym.sym) {
2893             case SDLK_ESCAPE:
2894             case SDLK_q:
2895                 do_exit(cur_stream);
2896                 break;
2897             case SDLK_f:
2898                 toggle_full_screen(cur_stream);
2899                 cur_stream->force_refresh = 1;
2900                 break;
2901             case SDLK_p:
2902             case SDLK_SPACE:
2903                 toggle_pause(cur_stream);
2904                 break;
2905             case SDLK_s: // S: Step to next frame
2906                 step_to_next_frame(cur_stream);
2907                 break;
2908             case SDLK_a:
2909                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2910                 break;
2911             case SDLK_v:
2912                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2913                 break;
2914             case SDLK_t:
2915                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2916                 break;
2917             case SDLK_w:
2918                 toggle_audio_display(cur_stream);
2919                 cur_stream->force_refresh = 1;
2920                 break;
2921             case SDLK_PAGEUP:
2922                 incr = 600.0;
2923                 goto do_seek;
2924             case SDLK_PAGEDOWN:
2925                 incr = -600.0;
2926                 goto do_seek;
2927             case SDLK_LEFT:
2928                 incr = -10.0;
2929                 goto do_seek;
2930             case SDLK_RIGHT:
2931                 incr = 10.0;
2932                 goto do_seek;
2933             case SDLK_UP:
2934                 incr = 60.0;
2935                 goto do_seek;
2936             case SDLK_DOWN:
2937                 incr = -60.0;
2938             do_seek:
2939                     if (seek_by_bytes) {
2940                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2941                             pos = cur_stream->video_current_pos;
2942                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2943                             pos = cur_stream->audio_pkt.pos;
2944                         } else
2945                             pos = avio_tell(cur_stream->ic->pb);
2946                         if (cur_stream->ic->bit_rate)
2947                             incr *= cur_stream->ic->bit_rate / 8.0;
2948                         else
2949                             incr *= 180000.0;
2950                         pos += incr;
2951                         stream_seek(cur_stream, pos, incr, 1);
2952                     } else {
2953                         pos = get_master_clock(cur_stream);
2954                         pos += incr;
2955                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2956                     }
2957                 break;
2958             default:
2959                 break;
2960             }
2961             break;
2962         case SDL_VIDEOEXPOSE:
2963             cur_stream->force_refresh = 1;
2964             break;
2965         case SDL_MOUSEBUTTONDOWN:
2966             if (exit_on_mousedown) {
2967                 do_exit(cur_stream);
2968                 break;
2969             }
2970         case SDL_MOUSEMOTION:
2971             if (event.type == SDL_MOUSEBUTTONDOWN) {
2972                 x = event.button.x;
2973             } else {
2974                 if (event.motion.state != SDL_PRESSED)
2975                     break;
2976                 x = event.motion.x;
2977             }
2978                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2979                     uint64_t size =  avio_size(cur_stream->ic->pb);
2980                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2981                 } else {
2982                     int64_t ts;
2983                     int ns, hh, mm, ss;
2984                     int tns, thh, tmm, tss;
2985                     tns  = cur_stream->ic->duration / 1000000LL;
2986                     thh  = tns / 3600;
2987                     tmm  = (tns % 3600) / 60;
2988                     tss  = (tns % 60);
2989                     frac = x / cur_stream->width;
2990                     ns   = frac * tns;
2991                     hh   = ns / 3600;
2992                     mm   = (ns % 3600) / 60;
2993                     ss   = (ns % 60);
2994                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2995                             hh, mm, ss, thh, tmm, tss);
2996                     ts = frac * cur_stream->ic->duration;
2997                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2998                         ts += cur_stream->ic->start_time;
2999                     stream_seek(cur_stream, ts, 0, 0);
3000                 }
3001             break;
3002         case SDL_VIDEORESIZE:
3003                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3004                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3005                 screen_width  = cur_stream->width  = event.resize.w;
3006                 screen_height = cur_stream->height = event.resize.h;
3007                 cur_stream->force_refresh = 1;
3008             break;
3009         case SDL_QUIT:
3010         case FF_QUIT_EVENT:
3011             do_exit(cur_stream);
3012             break;
3013         case FF_ALLOC_EVENT:
3014             alloc_picture(event.user.data1);
3015             break;
3016         case FF_REFRESH_EVENT:
3017             video_refresh(event.user.data1);
3018             cur_stream->refresh = 0;
3019             break;
3020         default:
3021             break;
3022         }
3023     }
3024 }
3025
3026 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3027 {
3028     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3029     return opt_default(NULL, "video_size", arg);
3030 }
3031
3032 static int opt_width(void *optctx, const char *opt, const char *arg)
3033 {
3034     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3035     return 0;
3036 }
3037
3038 static int opt_height(void *optctx, const char *opt, const char *arg)
3039 {
3040     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3041     return 0;
3042 }
3043
3044 static int opt_format(void *optctx, const char *opt, const char *arg)
3045 {
3046     file_iformat = av_find_input_format(arg);
3047     if (!file_iformat) {
3048         fprintf(stderr, "Unknown input format: %s\n", arg);
3049         return AVERROR(EINVAL);
3050     }
3051     return 0;
3052 }
3053
3054 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3055 {
3056     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3057     return opt_default(NULL, "pixel_format", arg);
3058 }
3059
3060 static int opt_sync(void *optctx, const char *opt, const char *arg)
3061 {
3062     if (!strcmp(arg, "audio"))
3063         av_sync_type = AV_SYNC_AUDIO_MASTER;
3064     else if (!strcmp(arg, "video"))
3065         av_sync_type = AV_SYNC_VIDEO_MASTER;
3066     else if (!strcmp(arg, "ext"))
3067         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3068     else {
3069         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3070         exit(1);
3071     }
3072     return 0;
3073 }
3074
3075 static int opt_seek(void *optctx, const char *opt, const char *arg)
3076 {
3077     start_time = parse_time_or_die(opt, arg, 1);
3078     return 0;
3079 }
3080
3081 static int opt_duration(void *optctx, const char *opt, const char *arg)
3082 {
3083     duration = parse_time_or_die(opt, arg, 1);
3084     return 0;
3085 }
3086
3087 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3088 {
3089     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3090                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3091                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3092                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3093     return 0;
3094 }
3095
3096 static void opt_input_file(void *optctx, const char *filename)
3097 {
3098     if (input_filename) {
3099         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3100                 filename, input_filename);
3101         exit(1);
3102     }
3103     if (!strcmp(filename, "-"))
3104         filename = "pipe:";
3105     input_filename = filename;
3106 }
3107
3108 static int opt_codec(void *o, const char *opt, const char *arg)
3109 {
3110     switch(opt[strlen(opt)-1]){
3111     case 'a' :    audio_codec_name = arg; break;
3112     case 's' : subtitle_codec_name = arg; break;
3113     case 'v' :    video_codec_name = arg; break;
3114     }
3115     return 0;
3116 }
3117
3118 static int dummy;
3119
3120 static const OptionDef options[] = {
3121 #include "cmdutils_common_opts.h"
3122     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3123     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3124     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3125     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3126     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3127     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3128     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3129     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3130     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3131     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3132     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3133     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3134     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3135     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3136     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3137     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3138     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3139     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3140     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3141     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3142     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3143     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3144     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3145     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3146     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3147     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3148     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3149     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3150     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3151     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3152     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3153     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3154     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3155     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3156 #if CONFIG_AVFILTER
3157     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3158 #endif
3159     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3160     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3161     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3162     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3163     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3164     { NULL, },
3165 };
3166
3167 static void show_usage(void)
3168 {
3169     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3170     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3171     av_log(NULL, AV_LOG_INFO, "\n");
3172 }
3173
3174 void show_help_default(const char *opt, const char *arg)
3175 {
3176     av_log_set_callback(log_callback_help);
3177     show_usage();
3178     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3179     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3180     printf("\n");
3181     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3182     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3183 #if !CONFIG_AVFILTER
3184     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3185 #else
3186     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3187 #endif
3188     printf("\nWhile playing:\n"
3189            "q, ESC              quit\n"
3190            "f                   toggle full screen\n"
3191            "p, SPC              pause\n"
3192            "a                   cycle audio channel\n"
3193            "v                   cycle video channel\n"
3194            "t                   cycle subtitle channel\n"
3195            "w                   show audio waves\n"
3196            "s                   activate frame-step mode\n"
3197            "left/right          seek backward/forward 10 seconds\n"
3198            "down/up             seek backward/forward 1 minute\n"
3199            "page down/page up   seek backward/forward 10 minutes\n"
3200            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3201            );
3202 }
3203
3204 static int lockmgr(void **mtx, enum AVLockOp op)
3205 {
3206    switch(op) {
3207       case AV_LOCK_CREATE:
3208           *mtx = SDL_CreateMutex();
3209           if(!*mtx)
3210               return 1;
3211           return 0;
3212       case AV_LOCK_OBTAIN:
3213           return !!SDL_LockMutex(*mtx);
3214       case AV_LOCK_RELEASE:
3215           return !!SDL_UnlockMutex(*mtx);
3216       case AV_LOCK_DESTROY:
3217           SDL_DestroyMutex(*mtx);
3218           return 0;
3219    }
3220    return 1;
3221 }
3222
3223 /* Called from the main */
3224 int main(int argc, char **argv)
3225 {
3226     int flags;
3227     VideoState *is;
3228     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3229
3230     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3231     parse_loglevel(argc, argv, options);
3232
3233     /* register all codecs, demux and protocols */
3234     avcodec_register_all();
3235 #if CONFIG_AVDEVICE
3236     avdevice_register_all();
3237 #endif
3238 #if CONFIG_AVFILTER
3239     avfilter_register_all();
3240 #endif
3241     av_register_all();
3242     avformat_network_init();
3243
3244     init_opts();
3245
3246     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3247     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3248
3249     show_banner(argc, argv, options);
3250
3251     parse_options(NULL, argc, argv, options, opt_input_file);
3252
3253     if (!input_filename) {
3254         show_usage();
3255         fprintf(stderr, "An input file must be specified\n");
3256         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3257         exit(1);
3258     }
3259
3260     if (display_disable) {
3261         video_disable = 1;
3262     }
3263     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3264     if (audio_disable)
3265         flags &= ~SDL_INIT_AUDIO;
3266     if (display_disable)
3267         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3268 #if !defined(__MINGW32__) && !defined(__APPLE__)
3269     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3270 #endif
3271     if (SDL_Init (flags)) {
3272         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3273         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3274         exit(1);
3275     }
3276
3277     if (!display_disable) {
3278 #if HAVE_SDL_VIDEO_SIZE
3279         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3280         fs_screen_width = vi->current_w;
3281         fs_screen_height = vi->current_h;
3282 #endif
3283     }
3284
3285     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3286     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3287     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3288
3289     if (av_lockmgr_register(lockmgr)) {
3290         fprintf(stderr, "Could not initialize lock manager!\n");
3291         do_exit(NULL);
3292     }
3293
3294     av_init_packet(&flush_pkt);
3295     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3296
3297     is = stream_open(input_filename, file_iformat);
3298     if (!is) {
3299         fprintf(stderr, "Failed to initialize VideoState!\n");
3300         do_exit(NULL);
3301     }
3302
3303     event_loop(is);
3304
3305     /* never returns */
3306
3307     return 0;
3308 }