ffplay: fill the unused part of the window with black
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct MyAVPacketList {
91     AVPacket pkt;
92     struct MyAVPacketList *next;
93     int serial;
94 } MyAVPacketList;
95
96 typedef struct PacketQueue {
97     MyAVPacketList *first_pkt, *last_pkt;
98     int nb_packets;
99     int size;
100     int abort_request;
101     int serial;
102     SDL_mutex *mutex;
103     SDL_cond *cond;
104 } PacketQueue;
105
106 #define VIDEO_PICTURE_QUEUE_SIZE 4
107 #define SUBPICTURE_QUEUE_SIZE 4
108
109 typedef struct VideoPicture {
110     double pts;             // presentation timestamp for this picture
111     int64_t pos;            // byte position in file
112     int skip;
113     SDL_Overlay *bmp;
114     int width, height; /* source height & width */
115     AVRational sample_aspect_ratio;
116     int allocated;
117     int reallocate;
118     int serial;
119
120 #if CONFIG_AVFILTER
121     AVFilterBufferRef *picref;
122 #endif
123 } VideoPicture;
124
125 typedef struct SubPicture {
126     double pts; /* presentation time stamp for this picture */
127     AVSubtitle sub;
128 } SubPicture;
129
130 typedef struct AudioParams {
131     int freq;
132     int channels;
133     int channel_layout;
134     enum AVSampleFormat fmt;
135 } AudioParams;
136
137 enum {
138     AV_SYNC_AUDIO_MASTER, /* default choice */
139     AV_SYNC_VIDEO_MASTER,
140     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
141 };
142
143 typedef struct VideoState {
144     SDL_Thread *read_tid;
145     SDL_Thread *video_tid;
146     SDL_Thread *refresh_tid;
147     AVInputFormat *iformat;
148     int no_background;
149     int abort_request;
150     int force_refresh;
151     int paused;
152     int last_paused;
153     int que_attachments_req;
154     int seek_req;
155     int seek_flags;
156     int64_t seek_pos;
157     int64_t seek_rel;
158     int read_pause_return;
159     AVFormatContext *ic;
160
161     int audio_stream;
162
163     int av_sync_type;
164     double external_clock;                   ///< external clock base
165     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
166     int64_t external_clock_time;             ///< last reference time
167
168     double audio_clock;
169     double audio_diff_cum; /* used for AV difference average computation */
170     double audio_diff_avg_coef;
171     double audio_diff_threshold;
172     int audio_diff_avg_count;
173     AVStream *audio_st;
174     PacketQueue audioq;
175     int audio_hw_buf_size;
176     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
177     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
178     uint8_t *audio_buf;
179     uint8_t *audio_buf1;
180     unsigned int audio_buf_size; /* in bytes */
181     int audio_buf_index; /* in bytes */
182     int audio_write_buf_size;
183     AVPacket audio_pkt_temp;
184     AVPacket audio_pkt;
185     int audio_pkt_temp_serial;
186     struct AudioParams audio_src;
187     struct AudioParams audio_tgt;
188     struct SwrContext *swr_ctx;
189     double audio_current_pts;
190     double audio_current_pts_drift;
191     int frame_drops_early;
192     int frame_drops_late;
193     AVFrame *frame;
194
195     enum ShowMode {
196         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
197     } show_mode;
198     int16_t sample_array[SAMPLE_ARRAY_SIZE];
199     int sample_array_index;
200     int last_i_start;
201     RDFTContext *rdft;
202     int rdft_bits;
203     FFTSample *rdft_data;
204     int xpos;
205
206     SDL_Thread *subtitle_tid;
207     int subtitle_stream;
208     int subtitle_stream_changed;
209     AVStream *subtitle_st;
210     PacketQueue subtitleq;
211     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
212     int subpq_size, subpq_rindex, subpq_windex;
213     SDL_mutex *subpq_mutex;
214     SDL_cond *subpq_cond;
215
216     double frame_timer;
217     double frame_last_pts;
218     double frame_last_duration;
219     double frame_last_dropped_pts;
220     double frame_last_returned_time;
221     double frame_last_filter_delay;
222     int64_t frame_last_dropped_pos;
223     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
224     int video_stream;
225     AVStream *video_st;
226     PacketQueue videoq;
227     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
228     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
229     int64_t video_current_pos;      // current displayed file pos
230     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
231     int pictq_size, pictq_rindex, pictq_windex;
232     SDL_mutex *pictq_mutex;
233     SDL_cond *pictq_cond;
234 #if !CONFIG_AVFILTER
235     struct SwsContext *img_convert_ctx;
236 #endif
237     SDL_Rect last_display_rect;
238
239     char filename[1024];
240     int width, height, xleft, ytop;
241     int step;
242
243 #if CONFIG_AVFILTER
244     AVFilterContext *in_video_filter;   // the first filter in the video chain
245     AVFilterContext *out_video_filter;  // the last filter in the video chain
246     int use_dr1;
247     FrameBuffer *buffer_pool;
248 #endif
249
250     int refresh;
251     int last_video_stream, last_audio_stream, last_subtitle_stream;
252
253     SDL_cond *continue_read_thread;
254 } VideoState;
255
256 /* options specified by the user */
257 static AVInputFormat *file_iformat;
258 static const char *input_filename;
259 static const char *window_title;
260 static int fs_screen_width;
261 static int fs_screen_height;
262 static int screen_width  = 0;
263 static int screen_height = 0;
264 static int audio_disable;
265 static int video_disable;
266 static int wanted_stream[AVMEDIA_TYPE_NB] = {
267     [AVMEDIA_TYPE_AUDIO]    = -1,
268     [AVMEDIA_TYPE_VIDEO]    = -1,
269     [AVMEDIA_TYPE_SUBTITLE] = -1,
270 };
271 static int seek_by_bytes = -1;
272 static int display_disable;
273 static int show_status = 1;
274 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
275 static int64_t start_time = AV_NOPTS_VALUE;
276 static int64_t duration = AV_NOPTS_VALUE;
277 static int workaround_bugs = 1;
278 static int fast = 0;
279 static int genpts = 0;
280 static int lowres = 0;
281 static int idct = FF_IDCT_AUTO;
282 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
283 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
284 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
285 static int error_concealment = 3;
286 static int decoder_reorder_pts = -1;
287 static int autoexit;
288 static int exit_on_keydown;
289 static int exit_on_mousedown;
290 static int loop = 1;
291 static int framedrop = -1;
292 static int infinite_buffer = -1;
293 static enum ShowMode show_mode = SHOW_MODE_NONE;
294 static const char *audio_codec_name;
295 static const char *subtitle_codec_name;
296 static const char *video_codec_name;
297 static int rdftspeed = 20;
298 #if CONFIG_AVFILTER
299 static char *vfilters = NULL;
300 #endif
301
302 /* current context */
303 static int is_full_screen;
304 static int64_t audio_callback_time;
305
306 static AVPacket flush_pkt;
307
308 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
309 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
310 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
311
312 static SDL_Surface *screen;
313
314 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
315
316 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
317 {
318     MyAVPacketList *pkt1;
319
320     if (q->abort_request)
321        return -1;
322
323     pkt1 = av_malloc(sizeof(MyAVPacketList));
324     if (!pkt1)
325         return -1;
326     pkt1->pkt = *pkt;
327     pkt1->next = NULL;
328     if (pkt == &flush_pkt)
329         q->serial++;
330     pkt1->serial = q->serial;
331
332     if (!q->last_pkt)
333         q->first_pkt = pkt1;
334     else
335         q->last_pkt->next = pkt1;
336     q->last_pkt = pkt1;
337     q->nb_packets++;
338     q->size += pkt1->pkt.size + sizeof(*pkt1);
339     /* XXX: should duplicate packet data in DV case */
340     SDL_CondSignal(q->cond);
341     return 0;
342 }
343
344 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
345 {
346     int ret;
347
348     /* duplicate the packet */
349     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
350         return -1;
351
352     SDL_LockMutex(q->mutex);
353     ret = packet_queue_put_private(q, pkt);
354     SDL_UnlockMutex(q->mutex);
355
356     if (pkt != &flush_pkt && ret < 0)
357         av_free_packet(pkt);
358
359     return ret;
360 }
361
362 /* packet queue handling */
363 static void packet_queue_init(PacketQueue *q)
364 {
365     memset(q, 0, sizeof(PacketQueue));
366     q->mutex = SDL_CreateMutex();
367     q->cond = SDL_CreateCond();
368     q->abort_request = 1;
369 }
370
371 static void packet_queue_flush(PacketQueue *q)
372 {
373     MyAVPacketList *pkt, *pkt1;
374
375     SDL_LockMutex(q->mutex);
376     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
377         pkt1 = pkt->next;
378         av_free_packet(&pkt->pkt);
379         av_freep(&pkt);
380     }
381     q->last_pkt = NULL;
382     q->first_pkt = NULL;
383     q->nb_packets = 0;
384     q->size = 0;
385     SDL_UnlockMutex(q->mutex);
386 }
387
388 static void packet_queue_destroy(PacketQueue *q)
389 {
390     packet_queue_flush(q);
391     SDL_DestroyMutex(q->mutex);
392     SDL_DestroyCond(q->cond);
393 }
394
395 static void packet_queue_abort(PacketQueue *q)
396 {
397     SDL_LockMutex(q->mutex);
398
399     q->abort_request = 1;
400
401     SDL_CondSignal(q->cond);
402
403     SDL_UnlockMutex(q->mutex);
404 }
405
406 static void packet_queue_start(PacketQueue *q)
407 {
408     SDL_LockMutex(q->mutex);
409     q->abort_request = 0;
410     packet_queue_put_private(q, &flush_pkt);
411     SDL_UnlockMutex(q->mutex);
412 }
413
414 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
415 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
416 {
417     MyAVPacketList *pkt1;
418     int ret;
419
420     SDL_LockMutex(q->mutex);
421
422     for (;;) {
423         if (q->abort_request) {
424             ret = -1;
425             break;
426         }
427
428         pkt1 = q->first_pkt;
429         if (pkt1) {
430             q->first_pkt = pkt1->next;
431             if (!q->first_pkt)
432                 q->last_pkt = NULL;
433             q->nb_packets--;
434             q->size -= pkt1->pkt.size + sizeof(*pkt1);
435             *pkt = pkt1->pkt;
436             if (serial)
437                 *serial = pkt1->serial;
438             av_free(pkt1);
439             ret = 1;
440             break;
441         } else if (!block) {
442             ret = 0;
443             break;
444         } else {
445             SDL_CondWait(q->cond, q->mutex);
446         }
447     }
448     SDL_UnlockMutex(q->mutex);
449     return ret;
450 }
451
452 static inline void fill_rectangle(SDL_Surface *screen,
453                                   int x, int y, int w, int h, int color, int update)
454 {
455     SDL_Rect rect;
456     rect.x = x;
457     rect.y = y;
458     rect.w = w;
459     rect.h = h;
460     SDL_FillRect(screen, &rect, color);
461     if (update && w > 0 && h > 0)
462         SDL_UpdateRect(screen, x, y, w, h);
463 }
464
465 /* draw only the border of a rectangle */
466 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
467 {
468     int w1, w2, h1, h2;
469
470     /* fill the background */
471     w1 = x;
472     if (w1 < 0)
473         w1 = 0;
474     w2 = width - (x + w);
475     if (w2 < 0)
476         w2 = 0;
477     h1 = y;
478     if (h1 < 0)
479         h1 = 0;
480     h2 = height - (y + h);
481     if (h2 < 0)
482         h2 = 0;
483     fill_rectangle(screen,
484                    xleft, ytop,
485                    w1, height,
486                    color, update);
487     fill_rectangle(screen,
488                    xleft + width - w2, ytop,
489                    w2, height,
490                    color, update);
491     fill_rectangle(screen,
492                    xleft + w1, ytop,
493                    width - w1 - w2, h1,
494                    color, update);
495     fill_rectangle(screen,
496                    xleft + w1, ytop + height - h2,
497                    width - w1 - w2, h2,
498                    color, update);
499 }
500
501 #define ALPHA_BLEND(a, oldp, newp, s)\
502 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
503
504 #define RGBA_IN(r, g, b, a, s)\
505 {\
506     unsigned int v = ((const uint32_t *)(s))[0];\
507     a = (v >> 24) & 0xff;\
508     r = (v >> 16) & 0xff;\
509     g = (v >> 8) & 0xff;\
510     b = v & 0xff;\
511 }
512
513 #define YUVA_IN(y, u, v, a, s, pal)\
514 {\
515     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
516     a = (val >> 24) & 0xff;\
517     y = (val >> 16) & 0xff;\
518     u = (val >> 8) & 0xff;\
519     v = val & 0xff;\
520 }
521
522 #define YUVA_OUT(d, y, u, v, a)\
523 {\
524     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
525 }
526
527
528 #define BPP 1
529
530 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
531 {
532     int wrap, wrap3, width2, skip2;
533     int y, u, v, a, u1, v1, a1, w, h;
534     uint8_t *lum, *cb, *cr;
535     const uint8_t *p;
536     const uint32_t *pal;
537     int dstx, dsty, dstw, dsth;
538
539     dstw = av_clip(rect->w, 0, imgw);
540     dsth = av_clip(rect->h, 0, imgh);
541     dstx = av_clip(rect->x, 0, imgw - dstw);
542     dsty = av_clip(rect->y, 0, imgh - dsth);
543     lum = dst->data[0] + dsty * dst->linesize[0];
544     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
545     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
546
547     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
548     skip2 = dstx >> 1;
549     wrap = dst->linesize[0];
550     wrap3 = rect->pict.linesize[0];
551     p = rect->pict.data[0];
552     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
553
554     if (dsty & 1) {
555         lum += dstx;
556         cb += skip2;
557         cr += skip2;
558
559         if (dstx & 1) {
560             YUVA_IN(y, u, v, a, p, pal);
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
563             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
564             cb++;
565             cr++;
566             lum++;
567             p += BPP;
568         }
569         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575
576             YUVA_IN(y, u, v, a, p + BPP, pal);
577             u1 += u;
578             v1 += v;
579             a1 += a;
580             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
583             cb++;
584             cr++;
585             p += 2 * BPP;
586             lum += 2;
587         }
588         if (w) {
589             YUVA_IN(y, u, v, a, p, pal);
590             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
592             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
593             p++;
594             lum++;
595         }
596         p += wrap3 - dstw * BPP;
597         lum += wrap - dstw - dstx;
598         cb += dst->linesize[1] - width2 - skip2;
599         cr += dst->linesize[2] - width2 - skip2;
600     }
601     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
602         lum += dstx;
603         cb += skip2;
604         cr += skip2;
605
606         if (dstx & 1) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             p += wrap3;
613             lum += wrap;
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621             cb++;
622             cr++;
623             p += -wrap3 + BPP;
624             lum += -wrap + 1;
625         }
626         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
627             YUVA_IN(y, u, v, a, p, pal);
628             u1 = u;
629             v1 = v;
630             a1 = a;
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632
633             YUVA_IN(y, u, v, a, p + BPP, pal);
634             u1 += u;
635             v1 += v;
636             a1 += a;
637             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
638             p += wrap3;
639             lum += wrap;
640
641             YUVA_IN(y, u, v, a, p, pal);
642             u1 += u;
643             v1 += v;
644             a1 += a;
645             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
646
647             YUVA_IN(y, u, v, a, p + BPP, pal);
648             u1 += u;
649             v1 += v;
650             a1 += a;
651             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
652
653             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
654             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
655
656             cb++;
657             cr++;
658             p += -wrap3 + 2 * BPP;
659             lum += -wrap + 2;
660         }
661         if (w) {
662             YUVA_IN(y, u, v, a, p, pal);
663             u1 = u;
664             v1 = v;
665             a1 = a;
666             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
667             p += wrap3;
668             lum += wrap;
669             YUVA_IN(y, u, v, a, p, pal);
670             u1 += u;
671             v1 += v;
672             a1 += a;
673             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
675             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
676             cb++;
677             cr++;
678             p += -wrap3 + BPP;
679             lum += -wrap + 1;
680         }
681         p += wrap3 + (wrap3 - dstw * BPP);
682         lum += wrap + (wrap - dstw - dstx);
683         cb += dst->linesize[1] - width2 - skip2;
684         cr += dst->linesize[2] - width2 - skip2;
685     }
686     /* handle odd height */
687     if (h) {
688         lum += dstx;
689         cb += skip2;
690         cr += skip2;
691
692         if (dstx & 1) {
693             YUVA_IN(y, u, v, a, p, pal);
694             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
695             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
696             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
697             cb++;
698             cr++;
699             lum++;
700             p += BPP;
701         }
702         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
703             YUVA_IN(y, u, v, a, p, pal);
704             u1 = u;
705             v1 = v;
706             a1 = a;
707             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
708
709             YUVA_IN(y, u, v, a, p + BPP, pal);
710             u1 += u;
711             v1 += v;
712             a1 += a;
713             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
714             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
715             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
716             cb++;
717             cr++;
718             p += 2 * BPP;
719             lum += 2;
720         }
721         if (w) {
722             YUVA_IN(y, u, v, a, p, pal);
723             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
724             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
725             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
726         }
727     }
728 }
729
730 static void free_subpicture(SubPicture *sp)
731 {
732     avsubtitle_free(&sp->sub);
733 }
734
735 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
736 {
737     float aspect_ratio;
738     int width, height, x, y;
739
740     if (vp->sample_aspect_ratio.num == 0)
741         aspect_ratio = 0;
742     else
743         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
744
745     if (aspect_ratio <= 0.0)
746         aspect_ratio = 1.0;
747     aspect_ratio *= (float)vp->width / (float)vp->height;
748
749     /* XXX: we suppose the screen has a 1.0 pixel ratio */
750     height = scr_height;
751     width = ((int)rint(height * aspect_ratio)) & ~1;
752     if (width > scr_width) {
753         width = scr_width;
754         height = ((int)rint(width / aspect_ratio)) & ~1;
755     }
756     x = (scr_width - width) / 2;
757     y = (scr_height - height) / 2;
758     rect->x = scr_xleft + x;
759     rect->y = scr_ytop  + y;
760     rect->w = FFMAX(width,  1);
761     rect->h = FFMAX(height, 1);
762 }
763
764 static void video_image_display(VideoState *is)
765 {
766     VideoPicture *vp;
767     SubPicture *sp;
768     AVPicture pict;
769     SDL_Rect rect;
770     int i;
771
772     vp = &is->pictq[is->pictq_rindex];
773     if (vp->bmp) {
774         if (is->subtitle_st) {
775             if (is->subpq_size > 0) {
776                 sp = &is->subpq[is->subpq_rindex];
777
778                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
779                     SDL_LockYUVOverlay (vp->bmp);
780
781                     pict.data[0] = vp->bmp->pixels[0];
782                     pict.data[1] = vp->bmp->pixels[2];
783                     pict.data[2] = vp->bmp->pixels[1];
784
785                     pict.linesize[0] = vp->bmp->pitches[0];
786                     pict.linesize[1] = vp->bmp->pitches[2];
787                     pict.linesize[2] = vp->bmp->pitches[1];
788
789                     for (i = 0; i < sp->sub.num_rects; i++)
790                         blend_subrect(&pict, sp->sub.rects[i],
791                                       vp->bmp->w, vp->bmp->h);
792
793                     SDL_UnlockYUVOverlay (vp->bmp);
794                 }
795             }
796         }
797
798         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
799
800         SDL_DisplayYUVOverlay(vp->bmp, &rect);
801
802         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
803             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
804             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
805             is->last_display_rect = rect;
806         }
807     }
808 }
809
810 static inline int compute_mod(int a, int b)
811 {
812     return a < 0 ? a%b + b : a%b;
813 }
814
815 static void video_audio_display(VideoState *s)
816 {
817     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818     int ch, channels, h, h2, bgcolor, fgcolor;
819     int16_t time_diff;
820     int rdft_bits, nb_freq;
821
822     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
823         ;
824     nb_freq = 1 << (rdft_bits - 1);
825
826     /* compute display index : center on currently output samples */
827     channels = s->audio_tgt.channels;
828     nb_display_channels = channels;
829     if (!s->paused) {
830         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
831         n = 2 * channels;
832         delay = s->audio_write_buf_size;
833         delay /= n;
834
835         /* to be more precise, we take into account the time spent since
836            the last buffer computation */
837         if (audio_callback_time) {
838             time_diff = av_gettime() - audio_callback_time;
839             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
840         }
841
842         delay += 2 * data_used;
843         if (delay < data_used)
844             delay = data_used;
845
846         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847         if (s->show_mode == SHOW_MODE_WAVES) {
848             h = INT_MIN;
849             for (i = 0; i < 1000; i += channels) {
850                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851                 int a = s->sample_array[idx];
852                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
853                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
854                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
855                 int score = a - d;
856                 if (h < score && (b ^ c) < 0) {
857                     h = score;
858                     i_start = idx;
859                 }
860             }
861         }
862
863         s->last_i_start = i_start;
864     } else {
865         i_start = s->last_i_start;
866     }
867
868     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869     if (s->show_mode == SHOW_MODE_WAVES) {
870         fill_rectangle(screen,
871                        s->xleft, s->ytop, s->width, s->height,
872                        bgcolor, 0);
873
874         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875
876         /* total height for one channel */
877         h = s->height / nb_display_channels;
878         /* graph height / 2 */
879         h2 = (h * 9) / 20;
880         for (ch = 0; ch < nb_display_channels; ch++) {
881             i = i_start + ch;
882             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883             for (x = 0; x < s->width; x++) {
884                 y = (s->sample_array[i] * h2) >> 15;
885                 if (y < 0) {
886                     y = -y;
887                     ys = y1 - y;
888                 } else {
889                     ys = y1;
890                 }
891                 fill_rectangle(screen,
892                                s->xleft + x, ys, 1, y,
893                                fgcolor, 0);
894                 i += channels;
895                 if (i >= SAMPLE_ARRAY_SIZE)
896                     i -= SAMPLE_ARRAY_SIZE;
897             }
898         }
899
900         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901
902         for (ch = 1; ch < nb_display_channels; ch++) {
903             y = s->ytop + ch * h;
904             fill_rectangle(screen,
905                            s->xleft, y, s->width, 1,
906                            fgcolor, 0);
907         }
908         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909     } else {
910         nb_display_channels= FFMIN(nb_display_channels, 2);
911         if (rdft_bits != s->rdft_bits) {
912             av_rdft_end(s->rdft);
913             av_free(s->rdft_data);
914             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915             s->rdft_bits = rdft_bits;
916             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
917         }
918         {
919             FFTSample *data[2];
920             for (ch = 0; ch < nb_display_channels; ch++) {
921                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
922                 i = i_start + ch;
923                 for (x = 0; x < 2 * nb_freq; x++) {
924                     double w = (x-nb_freq) * (1.0 / nb_freq);
925                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
926                     i += channels;
927                     if (i >= SAMPLE_ARRAY_SIZE)
928                         i -= SAMPLE_ARRAY_SIZE;
929                 }
930                 av_rdft_calc(s->rdft, data[ch]);
931             }
932             // least efficient way to do this, we should of course directly access it but its more than fast enough
933             for (y = 0; y < s->height; y++) {
934                 double w = 1 / sqrt(nb_freq);
935                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
936                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
937                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
938                 a = FFMIN(a, 255);
939                 b = FFMIN(b, 255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor, 0);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         if (!s->paused)
949             s->xpos++;
950         if (s->xpos >= s->width)
951             s->xpos= s->xleft;
952     }
953 }
954
955 static void stream_close(VideoState *is)
956 {
957     VideoPicture *vp;
958     int i;
959     /* XXX: use a special url_shutdown call to abort parse cleanly */
960     is->abort_request = 1;
961     SDL_WaitThread(is->read_tid, NULL);
962     SDL_WaitThread(is->refresh_tid, NULL);
963     packet_queue_destroy(&is->videoq);
964     packet_queue_destroy(&is->audioq);
965     packet_queue_destroy(&is->subtitleq);
966
967     /* free all pictures */
968     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
969         vp = &is->pictq[i];
970 #if CONFIG_AVFILTER
971         avfilter_unref_bufferp(&vp->picref);
972 #endif
973         if (vp->bmp) {
974             SDL_FreeYUVOverlay(vp->bmp);
975             vp->bmp = NULL;
976         }
977     }
978     SDL_DestroyMutex(is->pictq_mutex);
979     SDL_DestroyCond(is->pictq_cond);
980     SDL_DestroyMutex(is->subpq_mutex);
981     SDL_DestroyCond(is->subpq_cond);
982     SDL_DestroyCond(is->continue_read_thread);
983 #if !CONFIG_AVFILTER
984     if (is->img_convert_ctx)
985         sws_freeContext(is->img_convert_ctx);
986 #endif
987     av_free(is);
988 }
989
990 static void do_exit(VideoState *is)
991 {
992     if (is) {
993         stream_close(is);
994     }
995     av_lockmgr_register(NULL);
996     uninit_opts();
997 #if CONFIG_AVFILTER
998     avfilter_uninit();
999     av_freep(&vfilters);
1000 #endif
1001     avformat_network_deinit();
1002     if (show_status)
1003         printf("\n");
1004     SDL_Quit();
1005     av_log(NULL, AV_LOG_QUIET, "%s", "");
1006     exit(0);
1007 }
1008
1009 static void sigterm_handler(int sig)
1010 {
1011     exit(123);
1012 }
1013
1014 static int video_open(VideoState *is, int force_set_video_mode)
1015 {
1016     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1017     int w,h;
1018     VideoPicture *vp = &is->pictq[is->pictq_rindex];
1019     SDL_Rect rect;
1020
1021     if (is_full_screen) flags |= SDL_FULLSCREEN;
1022     else                flags |= SDL_RESIZABLE;
1023
1024     if (is_full_screen && fs_screen_width) {
1025         w = fs_screen_width;
1026         h = fs_screen_height;
1027     } else if (!is_full_screen && screen_width) {
1028         w = screen_width;
1029         h = screen_height;
1030     } else if (vp->width) {
1031         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1032         w = rect.w;
1033         h = rect.h;
1034     } else {
1035         w = 640;
1036         h = 480;
1037     }
1038     if (screen && is->width == screen->w && screen->w == w
1039        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1040         return 0;
1041     screen = SDL_SetVideoMode(w, h, 0, flags);
1042     if (!screen) {
1043         fprintf(stderr, "SDL: could not set video mode - exiting\n");
1044         do_exit(is);
1045     }
1046     if (!window_title)
1047         window_title = input_filename;
1048     SDL_WM_SetCaption(window_title, window_title);
1049
1050     is->width  = screen->w;
1051     is->height = screen->h;
1052
1053     return 0;
1054 }
1055
1056 /* display the current picture, if any */
1057 static void video_display(VideoState *is)
1058 {
1059     if (!screen)
1060         video_open(is, 0);
1061     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1062         video_audio_display(is);
1063     else if (is->video_st)
1064         video_image_display(is);
1065 }
1066
1067 static int refresh_thread(void *opaque)
1068 {
1069     VideoState *is= opaque;
1070     while (!is->abort_request) {
1071         SDL_Event event;
1072         event.type = FF_REFRESH_EVENT;
1073         event.user.data1 = opaque;
1074         if (!is->refresh && (!is->paused || is->force_refresh)) {
1075             is->refresh = 1;
1076             SDL_PushEvent(&event);
1077         }
1078         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1079         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1080     }
1081     return 0;
1082 }
1083
1084 /* get the current audio clock value */
1085 static double get_audio_clock(VideoState *is)
1086 {
1087     if (is->paused) {
1088         return is->audio_current_pts;
1089     } else {
1090         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1091     }
1092 }
1093
1094 /* get the current video clock value */
1095 static double get_video_clock(VideoState *is)
1096 {
1097     if (is->paused) {
1098         return is->video_current_pts;
1099     } else {
1100         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1101     }
1102 }
1103
1104 /* get the current external clock value */
1105 static double get_external_clock(VideoState *is)
1106 {
1107     if (is->paused) {
1108         return is->external_clock;
1109     } else {
1110         return is->external_clock_drift + av_gettime() / 1000000.0;
1111     }
1112 }
1113
1114 static int get_master_sync_type(VideoState *is) {
1115     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1116         if (is->video_st)
1117             return AV_SYNC_VIDEO_MASTER;
1118         else
1119             return AV_SYNC_AUDIO_MASTER;
1120     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1121         if (is->audio_st)
1122             return AV_SYNC_AUDIO_MASTER;
1123         else
1124             return AV_SYNC_EXTERNAL_CLOCK;
1125     } else {
1126         return AV_SYNC_EXTERNAL_CLOCK;
1127     }
1128 }
1129
1130 /* get the current master clock value */
1131 static double get_master_clock(VideoState *is)
1132 {
1133     double val;
1134
1135     switch (get_master_sync_type(is)) {
1136         case AV_SYNC_VIDEO_MASTER:
1137             val = get_video_clock(is);
1138             break;
1139         case AV_SYNC_AUDIO_MASTER:
1140             val = get_audio_clock(is);
1141             break;
1142         default:
1143             val = get_external_clock(is);
1144             break;
1145     }
1146     return val;
1147 }
1148
1149 static void update_external_clock_pts(VideoState *is, double pts)
1150 {
1151    is->external_clock_time = av_gettime();
1152    is->external_clock = pts;
1153    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1154 }
1155
1156 static void check_external_clock_sync(VideoState *is, double pts) {
1157     if (fabs(get_external_clock(is) - pts) > AV_NOSYNC_THRESHOLD) {
1158         update_external_clock_pts(is, pts);
1159     }
1160 }
1161
1162 /* seek in the stream */
1163 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1164 {
1165     if (!is->seek_req) {
1166         is->seek_pos = pos;
1167         is->seek_rel = rel;
1168         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1169         if (seek_by_bytes)
1170             is->seek_flags |= AVSEEK_FLAG_BYTE;
1171         is->seek_req = 1;
1172     }
1173 }
1174
1175 /* pause or resume the video */
1176 static void stream_toggle_pause(VideoState *is)
1177 {
1178     if (is->paused) {
1179         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1180         if (is->read_pause_return != AVERROR(ENOSYS)) {
1181             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1182         }
1183         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1184     }
1185     update_external_clock_pts(is, get_external_clock(is));
1186     is->paused = !is->paused;
1187 }
1188
1189 static double compute_target_delay(double delay, VideoState *is)
1190 {
1191     double sync_threshold, diff;
1192
1193     /* update delay to follow master synchronisation source */
1194     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1195         /* if video is slave, we try to correct big delays by
1196            duplicating or deleting a frame */
1197         diff = get_video_clock(is) - get_master_clock(is);
1198
1199         /* skip or repeat frame. We take into account the
1200            delay to compute the threshold. I still don't know
1201            if it is the best guess */
1202         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1203         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1204             if (diff <= -sync_threshold)
1205                 delay = 0;
1206             else if (diff >= sync_threshold)
1207                 delay = 2 * delay;
1208         }
1209     }
1210
1211     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1212             delay, -diff);
1213
1214     return delay;
1215 }
1216
1217 static void pictq_next_picture(VideoState *is) {
1218     /* update queue size and signal for next picture */
1219     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1220         is->pictq_rindex = 0;
1221
1222     SDL_LockMutex(is->pictq_mutex);
1223     is->pictq_size--;
1224     SDL_CondSignal(is->pictq_cond);
1225     SDL_UnlockMutex(is->pictq_mutex);
1226 }
1227
1228 static void pictq_prev_picture(VideoState *is) {
1229     VideoPicture *prevvp;
1230     /* update queue size and signal for the previous picture */
1231     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1232     if (prevvp->allocated && !prevvp->skip) {
1233         SDL_LockMutex(is->pictq_mutex);
1234         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1235             if (--is->pictq_rindex == -1)
1236                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1237             is->pictq_size++;
1238         }
1239         SDL_CondSignal(is->pictq_cond);
1240         SDL_UnlockMutex(is->pictq_mutex);
1241     }
1242 }
1243
1244 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1245     double time = av_gettime() / 1000000.0;
1246     /* update current video pts */
1247     is->video_current_pts = pts;
1248     is->video_current_pts_drift = is->video_current_pts - time;
1249     is->video_current_pos = pos;
1250     is->frame_last_pts = pts;
1251     if (is->videoq.serial == serial)
1252         check_external_clock_sync(is, is->video_current_pts);
1253 }
1254
1255 /* called to display each frame */
1256 static void video_refresh(void *opaque)
1257 {
1258     VideoState *is = opaque;
1259     VideoPicture *vp;
1260     double time;
1261
1262     SubPicture *sp, *sp2;
1263
1264     if (is->video_st) {
1265         if (is->force_refresh)
1266             pictq_prev_picture(is);
1267 retry:
1268         if (is->pictq_size == 0) {
1269             SDL_LockMutex(is->pictq_mutex);
1270             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1271                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, 0);
1272                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1273             }
1274             SDL_UnlockMutex(is->pictq_mutex);
1275             // nothing to do, no picture to display in the que
1276         } else {
1277             double last_duration, duration, delay;
1278             /* dequeue the picture */
1279             vp = &is->pictq[is->pictq_rindex];
1280
1281             if (vp->skip) {
1282                 pictq_next_picture(is);
1283                 goto retry;
1284             }
1285
1286             if (is->paused)
1287                 goto display;
1288
1289             /* compute nominal last_duration */
1290             last_duration = vp->pts - is->frame_last_pts;
1291             if (last_duration > 0 && last_duration < 10.0) {
1292                 /* if duration of the last frame was sane, update last_duration in video state */
1293                 is->frame_last_duration = last_duration;
1294             }
1295             delay = compute_target_delay(is->frame_last_duration, is);
1296
1297             time= av_gettime()/1000000.0;
1298             if (time < is->frame_timer + delay)
1299                 return;
1300
1301             if (delay > 0)
1302                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1303
1304             SDL_LockMutex(is->pictq_mutex);
1305             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1306             SDL_UnlockMutex(is->pictq_mutex);
1307
1308             if (is->pictq_size > 1) {
1309                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1310                 duration = nextvp->pts - vp->pts;
1311                 if((framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1312                     is->frame_drops_late++;
1313                     pictq_next_picture(is);
1314                     goto retry;
1315                 }
1316             }
1317
1318             if (is->subtitle_st) {
1319                 if (is->subtitle_stream_changed) {
1320                     SDL_LockMutex(is->subpq_mutex);
1321
1322                     while (is->subpq_size) {
1323                         free_subpicture(&is->subpq[is->subpq_rindex]);
1324
1325                         /* update queue size and signal for next picture */
1326                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1327                             is->subpq_rindex = 0;
1328
1329                         is->subpq_size--;
1330                     }
1331                     is->subtitle_stream_changed = 0;
1332
1333                     SDL_CondSignal(is->subpq_cond);
1334                     SDL_UnlockMutex(is->subpq_mutex);
1335                 } else {
1336                     if (is->subpq_size > 0) {
1337                         sp = &is->subpq[is->subpq_rindex];
1338
1339                         if (is->subpq_size > 1)
1340                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1341                         else
1342                             sp2 = NULL;
1343
1344                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1345                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1346                         {
1347                             free_subpicture(sp);
1348
1349                             /* update queue size and signal for next picture */
1350                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1351                                 is->subpq_rindex = 0;
1352
1353                             SDL_LockMutex(is->subpq_mutex);
1354                             is->subpq_size--;
1355                             SDL_CondSignal(is->subpq_cond);
1356                             SDL_UnlockMutex(is->subpq_mutex);
1357                         }
1358                     }
1359                 }
1360             }
1361
1362 display:
1363             /* display picture */
1364             if (!display_disable)
1365                 video_display(is);
1366
1367             pictq_next_picture(is);
1368         }
1369     } else if (is->audio_st) {
1370         /* draw the next audio frame */
1371
1372         /* if only audio stream, then display the audio bars (better
1373            than nothing, just to test the implementation */
1374
1375         /* display picture */
1376         if (!display_disable)
1377             video_display(is);
1378     }
1379     is->force_refresh = 0;
1380     if (show_status) {
1381         static int64_t last_time;
1382         int64_t cur_time;
1383         int aqsize, vqsize, sqsize;
1384         double av_diff;
1385
1386         cur_time = av_gettime();
1387         if (!last_time || (cur_time - last_time) >= 30000) {
1388             aqsize = 0;
1389             vqsize = 0;
1390             sqsize = 0;
1391             if (is->audio_st)
1392                 aqsize = is->audioq.size;
1393             if (is->video_st)
1394                 vqsize = is->videoq.size;
1395             if (is->subtitle_st)
1396                 sqsize = is->subtitleq.size;
1397             av_diff = 0;
1398             if (is->audio_st && is->video_st)
1399                 av_diff = get_audio_clock(is) - get_video_clock(is);
1400             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1401                    get_master_clock(is),
1402                    av_diff,
1403                    is->frame_drops_early + is->frame_drops_late,
1404                    aqsize / 1024,
1405                    vqsize / 1024,
1406                    sqsize,
1407                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1408                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1409             fflush(stdout);
1410             last_time = cur_time;
1411         }
1412     }
1413 }
1414
1415 /* allocate a picture (needs to do that in main thread to avoid
1416    potential locking problems */
1417 static void alloc_picture(VideoState *is)
1418 {
1419     VideoPicture *vp;
1420
1421     vp = &is->pictq[is->pictq_windex];
1422
1423     if (vp->bmp)
1424         SDL_FreeYUVOverlay(vp->bmp);
1425
1426 #if CONFIG_AVFILTER
1427     avfilter_unref_bufferp(&vp->picref);
1428 #endif
1429
1430     video_open(is, 0);
1431
1432     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1433                                    SDL_YV12_OVERLAY,
1434                                    screen);
1435     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1436         /* SDL allocates a buffer smaller than requested if the video
1437          * overlay hardware is unable to support the requested size. */
1438         fprintf(stderr, "Error: the video system does not support an image\n"
1439                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1440                         "to reduce the image size.\n", vp->width, vp->height );
1441         do_exit(is);
1442     }
1443
1444     SDL_LockMutex(is->pictq_mutex);
1445     vp->allocated = 1;
1446     SDL_CondSignal(is->pictq_cond);
1447     SDL_UnlockMutex(is->pictq_mutex);
1448 }
1449
1450 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
1451 {
1452     VideoPicture *vp;
1453     double frame_delay, pts = pts1;
1454
1455     /* compute the exact PTS for the picture if it is omitted in the stream
1456      * pts1 is the dts of the pkt / pts of the frame */
1457     if (pts != 0) {
1458         /* update video clock with pts, if present */
1459         is->video_clock = pts;
1460     } else {
1461         pts = is->video_clock;
1462     }
1463     /* update video clock for next frame */
1464     frame_delay = av_q2d(is->video_st->codec->time_base);
1465     /* for MPEG2, the frame can be repeated, so we update the
1466        clock accordingly */
1467     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1468     is->video_clock += frame_delay;
1469
1470 #if defined(DEBUG_SYNC) && 0
1471     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1472            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1473 #endif
1474
1475     /* wait until we have space to put a new picture */
1476     SDL_LockMutex(is->pictq_mutex);
1477
1478     /* keep the last already displayed picture in the queue */
1479     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1480            !is->videoq.abort_request) {
1481         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1482     }
1483     SDL_UnlockMutex(is->pictq_mutex);
1484
1485     if (is->videoq.abort_request)
1486         return -1;
1487
1488     vp = &is->pictq[is->pictq_windex];
1489
1490 #if CONFIG_AVFILTER
1491     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1492 #else
1493     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1494 #endif
1495
1496     /* alloc or resize hardware picture buffer */
1497     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1498         vp->width  != src_frame->width ||
1499         vp->height != src_frame->height) {
1500         SDL_Event event;
1501
1502         vp->allocated  = 0;
1503         vp->reallocate = 0;
1504         vp->width = src_frame->width;
1505         vp->height = src_frame->height;
1506
1507         /* the allocation must be done in the main thread to avoid
1508            locking problems. */
1509         event.type = FF_ALLOC_EVENT;
1510         event.user.data1 = is;
1511         SDL_PushEvent(&event);
1512
1513         /* wait until the picture is allocated */
1514         SDL_LockMutex(is->pictq_mutex);
1515         while (!vp->allocated && !is->videoq.abort_request) {
1516             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1517         }
1518         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1519         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1520             while (!vp->allocated) {
1521                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1522             }
1523         }
1524         SDL_UnlockMutex(is->pictq_mutex);
1525
1526         if (is->videoq.abort_request)
1527             return -1;
1528     }
1529
1530     /* if the frame is not skipped, then display it */
1531     if (vp->bmp) {
1532         AVPicture pict = { { 0 } };
1533 #if CONFIG_AVFILTER
1534         avfilter_unref_bufferp(&vp->picref);
1535         vp->picref = src_frame->opaque;
1536 #endif
1537
1538         /* get a pointer on the bitmap */
1539         SDL_LockYUVOverlay (vp->bmp);
1540
1541         pict.data[0] = vp->bmp->pixels[0];
1542         pict.data[1] = vp->bmp->pixels[2];
1543         pict.data[2] = vp->bmp->pixels[1];
1544
1545         pict.linesize[0] = vp->bmp->pitches[0];
1546         pict.linesize[1] = vp->bmp->pitches[2];
1547         pict.linesize[2] = vp->bmp->pitches[1];
1548
1549 #if CONFIG_AVFILTER
1550         // FIXME use direct rendering
1551         av_picture_copy(&pict, (AVPicture *)src_frame,
1552                         src_frame->format, vp->width, vp->height);
1553 #else
1554         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1555         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1556             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1557             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1558         if (is->img_convert_ctx == NULL) {
1559             fprintf(stderr, "Cannot initialize the conversion context\n");
1560             exit(1);
1561         }
1562         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1563                   0, vp->height, pict.data, pict.linesize);
1564 #endif
1565         /* update the bitmap content */
1566         SDL_UnlockYUVOverlay(vp->bmp);
1567
1568         vp->pts = pts;
1569         vp->pos = pos;
1570         vp->skip = 0;
1571         vp->serial = serial;
1572
1573         /* now we can update the picture count */
1574         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1575             is->pictq_windex = 0;
1576         SDL_LockMutex(is->pictq_mutex);
1577         is->pictq_size++;
1578         SDL_UnlockMutex(is->pictq_mutex);
1579     }
1580     return 0;
1581 }
1582
1583 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1584 {
1585     int got_picture, i;
1586
1587     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1588         return -1;
1589
1590     if (pkt->data == flush_pkt.data) {
1591         avcodec_flush_buffers(is->video_st->codec);
1592
1593         SDL_LockMutex(is->pictq_mutex);
1594         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1595         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1596             is->pictq[i].skip = 1;
1597         }
1598         while (is->pictq_size && !is->videoq.abort_request) {
1599             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1600         }
1601         is->video_current_pos = -1;
1602         is->frame_last_pts = AV_NOPTS_VALUE;
1603         is->frame_last_duration = 0;
1604         is->frame_timer = (double)av_gettime() / 1000000.0;
1605         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1606         SDL_UnlockMutex(is->pictq_mutex);
1607
1608         return 0;
1609     }
1610
1611     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1612         return 0;
1613
1614     if (got_picture) {
1615         int ret = 1;
1616
1617         if (decoder_reorder_pts == -1) {
1618             *pts = av_frame_get_best_effort_timestamp(frame);
1619         } else if (decoder_reorder_pts) {
1620             *pts = frame->pkt_pts;
1621         } else {
1622             *pts = frame->pkt_dts;
1623         }
1624
1625         if (*pts == AV_NOPTS_VALUE) {
1626             *pts = 0;
1627         }
1628
1629         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1630             SDL_LockMutex(is->pictq_mutex);
1631             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1632                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1633                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1634                 double ptsdiff = dpts - is->frame_last_pts;
1635                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1636                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1637                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1638                     is->frame_last_dropped_pos = pkt->pos;
1639                     is->frame_last_dropped_pts = dpts;
1640                     is->frame_drops_early++;
1641                     ret = 0;
1642                 }
1643             }
1644             SDL_UnlockMutex(is->pictq_mutex);
1645         }
1646
1647         return ret;
1648     }
1649     return 0;
1650 }
1651
1652 #if CONFIG_AVFILTER
1653 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1654                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1655 {
1656     int ret;
1657     AVFilterInOut *outputs = NULL, *inputs = NULL;
1658
1659     if (filtergraph) {
1660         outputs = avfilter_inout_alloc();
1661         inputs  = avfilter_inout_alloc();
1662         if (!outputs || !inputs) {
1663             ret = AVERROR(ENOMEM);
1664             goto fail;
1665         }
1666
1667         outputs->name       = av_strdup("in");
1668         outputs->filter_ctx = source_ctx;
1669         outputs->pad_idx    = 0;
1670         outputs->next       = NULL;
1671
1672         inputs->name        = av_strdup("out");
1673         inputs->filter_ctx  = sink_ctx;
1674         inputs->pad_idx     = 0;
1675         inputs->next        = NULL;
1676
1677         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1678             goto fail;
1679     } else {
1680         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1681             goto fail;
1682     }
1683
1684     return avfilter_graph_config(graph, NULL);
1685 fail:
1686     avfilter_inout_free(&outputs);
1687     avfilter_inout_free(&inputs);
1688     return ret;
1689 }
1690
1691 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1692 {
1693     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1694     char sws_flags_str[128];
1695     char buffersrc_args[256];
1696     int ret;
1697     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1698     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format, *filt_crop;
1699     AVCodecContext *codec = is->video_st->codec;
1700
1701     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1702     graph->scale_sws_opts = av_strdup(sws_flags_str);
1703
1704     snprintf(buffersrc_args, sizeof(buffersrc_args),
1705              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1706              codec->width, codec->height, codec->pix_fmt,
1707              is->video_st->time_base.num, is->video_st->time_base.den,
1708              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1709
1710     if ((ret = avfilter_graph_create_filter(&filt_src,
1711                                             avfilter_get_by_name("buffer"),
1712                                             "ffplay_buffer", buffersrc_args, NULL,
1713                                             graph)) < 0)
1714         return ret;
1715
1716     buffersink_params->pixel_fmts = pix_fmts;
1717     ret = avfilter_graph_create_filter(&filt_out,
1718                                        avfilter_get_by_name("ffbuffersink"),
1719                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1720     av_freep(&buffersink_params);
1721     if (ret < 0)
1722         return ret;
1723
1724     /* SDL YUV code is not handling odd width/height for some driver
1725      * combinations, therefore we crop the picture to an even width/height. */
1726     if ((ret = avfilter_graph_create_filter(&filt_crop,
1727                                             avfilter_get_by_name("crop"),
1728                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1729         return ret;
1730     if ((ret = avfilter_graph_create_filter(&filt_format,
1731                                             avfilter_get_by_name("format"),
1732                                             "format", "yuv420p", NULL, graph)) < 0)
1733         return ret;
1734     if ((ret = avfilter_link(filt_crop, 0, filt_format, 0)) < 0)
1735         return ret;
1736     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1737         return ret;
1738
1739     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1740         return ret;
1741
1742     is->in_video_filter  = filt_src;
1743     is->out_video_filter = filt_out;
1744
1745     return ret;
1746 }
1747
1748 #endif  /* CONFIG_AVFILTER */
1749
1750 static int video_thread(void *arg)
1751 {
1752     AVPacket pkt = { 0 };
1753     VideoState *is = arg;
1754     AVFrame *frame = avcodec_alloc_frame();
1755     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1756     double pts;
1757     int ret;
1758     int serial = 0;
1759
1760 #if CONFIG_AVFILTER
1761     AVCodecContext *codec = is->video_st->codec;
1762     AVFilterGraph *graph = avfilter_graph_alloc();
1763     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1764     int last_w = 0;
1765     int last_h = 0;
1766     enum AVPixelFormat last_format = -2;
1767
1768     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1769         is->use_dr1 = 1;
1770         codec->get_buffer     = codec_get_buffer;
1771         codec->release_buffer = codec_release_buffer;
1772         codec->opaque         = &is->buffer_pool;
1773     }
1774 #endif
1775
1776     for (;;) {
1777 #if CONFIG_AVFILTER
1778         AVFilterBufferRef *picref;
1779         AVRational tb;
1780 #endif
1781         while (is->paused && !is->videoq.abort_request)
1782             SDL_Delay(10);
1783
1784         avcodec_get_frame_defaults(frame);
1785         av_free_packet(&pkt);
1786
1787         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1788         if (ret < 0)
1789             goto the_end;
1790
1791         if (!ret)
1792             continue;
1793
1794 #if CONFIG_AVFILTER
1795         if (   last_w != is->video_st->codec->width
1796             || last_h != is->video_st->codec->height
1797             || last_format != is->video_st->codec->pix_fmt) {
1798             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1799                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1800             avfilter_graph_free(&graph);
1801             graph = avfilter_graph_alloc();
1802             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1803                 SDL_Event event;
1804                 event.type = FF_QUIT_EVENT;
1805                 event.user.data1 = is;
1806                 SDL_PushEvent(&event);
1807                 av_free_packet(&pkt);
1808                 goto the_end;
1809             }
1810             filt_in  = is->in_video_filter;
1811             filt_out = is->out_video_filter;
1812             last_w = is->video_st->codec->width;
1813             last_h = is->video_st->codec->height;
1814             last_format = is->video_st->codec->pix_fmt;
1815         }
1816
1817         frame->pts = pts_int;
1818         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1819         if (is->use_dr1 && frame->opaque) {
1820             FrameBuffer      *buf = frame->opaque;
1821             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1822                                         frame->data, frame->linesize,
1823                                         AV_PERM_READ | AV_PERM_PRESERVE,
1824                                         frame->width, frame->height,
1825                                         frame->format);
1826
1827             avfilter_copy_frame_props(fb, frame);
1828             fb->buf->priv           = buf;
1829             fb->buf->free           = filter_release_buffer;
1830
1831             buf->refcount++;
1832             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1833
1834         } else
1835             av_buffersrc_write_frame(filt_in, frame);
1836
1837         av_free_packet(&pkt);
1838
1839         while (ret >= 0) {
1840             is->frame_last_returned_time = av_gettime() / 1000000.0;
1841
1842             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1843             if (ret < 0) {
1844                 ret = 0;
1845                 break;
1846             }
1847
1848             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1849             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1850                 is->frame_last_filter_delay = 0;
1851
1852             avfilter_copy_buf_props(frame, picref);
1853
1854             pts_int = picref->pts;
1855             tb      = filt_out->inputs[0]->time_base;
1856             pos     = picref->pos;
1857             frame->opaque = picref;
1858
1859             if (av_cmp_q(tb, is->video_st->time_base)) {
1860                 av_unused int64_t pts1 = pts_int;
1861                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1862                 av_dlog(NULL, "video_thread(): "
1863                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1864                         tb.num, tb.den, pts1,
1865                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1866             }
1867             pts = pts_int * av_q2d(is->video_st->time_base);
1868             ret = queue_picture(is, frame, pts, pos, serial);
1869         }
1870 #else
1871         pts = pts_int * av_q2d(is->video_st->time_base);
1872         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1873 #endif
1874
1875         if (ret < 0)
1876             goto the_end;
1877
1878         if (is->step)
1879             stream_toggle_pause(is);
1880     }
1881  the_end:
1882     avcodec_flush_buffers(is->video_st->codec);
1883 #if CONFIG_AVFILTER
1884     avfilter_graph_free(&graph);
1885 #endif
1886     av_free_packet(&pkt);
1887     avcodec_free_frame(&frame);
1888     return 0;
1889 }
1890
1891 static int subtitle_thread(void *arg)
1892 {
1893     VideoState *is = arg;
1894     SubPicture *sp;
1895     AVPacket pkt1, *pkt = &pkt1;
1896     int got_subtitle;
1897     double pts;
1898     int i, j;
1899     int r, g, b, y, u, v, a;
1900
1901     for (;;) {
1902         while (is->paused && !is->subtitleq.abort_request) {
1903             SDL_Delay(10);
1904         }
1905         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1906             break;
1907
1908         if (pkt->data == flush_pkt.data) {
1909             avcodec_flush_buffers(is->subtitle_st->codec);
1910             continue;
1911         }
1912         SDL_LockMutex(is->subpq_mutex);
1913         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1914                !is->subtitleq.abort_request) {
1915             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1916         }
1917         SDL_UnlockMutex(is->subpq_mutex);
1918
1919         if (is->subtitleq.abort_request)
1920             return 0;
1921
1922         sp = &is->subpq[is->subpq_windex];
1923
1924        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1925            this packet, if any */
1926         pts = 0;
1927         if (pkt->pts != AV_NOPTS_VALUE)
1928             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1929
1930         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1931                                  &got_subtitle, pkt);
1932         if (got_subtitle && sp->sub.format == 0) {
1933             if (sp->sub.pts != AV_NOPTS_VALUE)
1934                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1935             sp->pts = pts;
1936
1937             for (i = 0; i < sp->sub.num_rects; i++)
1938             {
1939                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1940                 {
1941                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1942                     y = RGB_TO_Y_CCIR(r, g, b);
1943                     u = RGB_TO_U_CCIR(r, g, b, 0);
1944                     v = RGB_TO_V_CCIR(r, g, b, 0);
1945                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1946                 }
1947             }
1948
1949             /* now we can update the picture count */
1950             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1951                 is->subpq_windex = 0;
1952             SDL_LockMutex(is->subpq_mutex);
1953             is->subpq_size++;
1954             SDL_UnlockMutex(is->subpq_mutex);
1955         }
1956         av_free_packet(pkt);
1957     }
1958     return 0;
1959 }
1960
1961 /* copy samples for viewing in editor window */
1962 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1963 {
1964     int size, len;
1965
1966     size = samples_size / sizeof(short);
1967     while (size > 0) {
1968         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1969         if (len > size)
1970             len = size;
1971         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1972         samples += len;
1973         is->sample_array_index += len;
1974         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1975             is->sample_array_index = 0;
1976         size -= len;
1977     }
1978 }
1979
1980 /* return the wanted number of samples to get better sync if sync_type is video
1981  * or external master clock */
1982 static int synchronize_audio(VideoState *is, int nb_samples)
1983 {
1984     int wanted_nb_samples = nb_samples;
1985
1986     /* if not master, then we try to remove or add samples to correct the clock */
1987     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
1988         double diff, avg_diff;
1989         int min_nb_samples, max_nb_samples;
1990
1991         diff = get_audio_clock(is) - get_master_clock(is);
1992
1993         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1994             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1995             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1996                 /* not enough measures to have a correct estimate */
1997                 is->audio_diff_avg_count++;
1998             } else {
1999                 /* estimate the A-V difference */
2000                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2001
2002                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2003                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2004                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2005                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2006                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2007                 }
2008                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2009                         diff, avg_diff, wanted_nb_samples - nb_samples,
2010                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2011             }
2012         } else {
2013             /* too big difference : may be initial PTS errors, so
2014                reset A-V filter */
2015             is->audio_diff_avg_count = 0;
2016             is->audio_diff_cum       = 0;
2017         }
2018     }
2019
2020     return wanted_nb_samples;
2021 }
2022
2023 /* decode one audio frame and returns its uncompressed size */
2024 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2025 {
2026     AVPacket *pkt_temp = &is->audio_pkt_temp;
2027     AVPacket *pkt = &is->audio_pkt;
2028     AVCodecContext *dec = is->audio_st->codec;
2029     int len1, len2, data_size, resampled_data_size;
2030     int64_t dec_channel_layout;
2031     int got_frame;
2032     double pts;
2033     int new_packet = 0;
2034     int flush_complete = 0;
2035     int wanted_nb_samples;
2036
2037     for (;;) {
2038         /* NOTE: the audio packet can contain several frames */
2039         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2040             if (!is->frame) {
2041                 if (!(is->frame = avcodec_alloc_frame()))
2042                     return AVERROR(ENOMEM);
2043             } else
2044                 avcodec_get_frame_defaults(is->frame);
2045
2046             if (is->paused)
2047                 return -1;
2048
2049             if (flush_complete)
2050                 break;
2051             new_packet = 0;
2052             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2053             if (len1 < 0) {
2054                 /* if error, we skip the frame */
2055                 pkt_temp->size = 0;
2056                 break;
2057             }
2058
2059             pkt_temp->data += len1;
2060             pkt_temp->size -= len1;
2061
2062             if (!got_frame) {
2063                 /* stop sending empty packets if the decoder is finished */
2064                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2065                     flush_complete = 1;
2066                 continue;
2067             }
2068             data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
2069                                                    is->frame->nb_samples,
2070                                                    is->frame->format, 1);
2071
2072             dec_channel_layout =
2073                 (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2074                 is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
2075             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2076
2077             if (is->frame->format        != is->audio_src.fmt            ||
2078                 dec_channel_layout       != is->audio_src.channel_layout ||
2079                 is->frame->sample_rate   != is->audio_src.freq           ||
2080                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2081                 swr_free(&is->swr_ctx);
2082                 is->swr_ctx = swr_alloc_set_opts(NULL,
2083                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2084                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2085                                                  0, NULL);
2086                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2087                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2088                         is->frame->sample_rate,   av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
2089                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2090                     break;
2091                 }
2092                 is->audio_src.channel_layout = dec_channel_layout;
2093                 is->audio_src.channels = is->frame->channels;
2094                 is->audio_src.freq = is->frame->sample_rate;
2095                 is->audio_src.fmt = is->frame->format;
2096             }
2097
2098             if (is->swr_ctx) {
2099                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2100                 uint8_t *out[] = {is->audio_buf2};
2101                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2102                 if (wanted_nb_samples != is->frame->nb_samples) {
2103                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2104                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2105                         fprintf(stderr, "swr_set_compensation() failed\n");
2106                         break;
2107                     }
2108                 }
2109                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2110                 if (len2 < 0) {
2111                     fprintf(stderr, "swr_convert() failed\n");
2112                     break;
2113                 }
2114                 if (len2 == out_count) {
2115                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2116                     swr_init(is->swr_ctx);
2117                 }
2118                 is->audio_buf = is->audio_buf2;
2119                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2120             } else {
2121                 is->audio_buf = is->frame->data[0];
2122                 resampled_data_size = data_size;
2123             }
2124
2125             /* if no pts, then compute it */
2126             pts = is->audio_clock;
2127             *pts_ptr = pts;
2128             is->audio_clock += (double)data_size /
2129                 (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
2130 #ifdef DEBUG
2131             {
2132                 static double last_clock;
2133                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2134                        is->audio_clock - last_clock,
2135                        is->audio_clock, pts);
2136                 last_clock = is->audio_clock;
2137             }
2138 #endif
2139             return resampled_data_size;
2140         }
2141
2142         /* free the current packet */
2143         if (pkt->data)
2144             av_free_packet(pkt);
2145         memset(pkt_temp, 0, sizeof(*pkt_temp));
2146
2147         if (is->paused || is->audioq.abort_request) {
2148             return -1;
2149         }
2150
2151         if (is->audioq.nb_packets == 0)
2152             SDL_CondSignal(is->continue_read_thread);
2153
2154         /* read next packet */
2155         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2156             return -1;
2157
2158         if (pkt->data == flush_pkt.data) {
2159             avcodec_flush_buffers(dec);
2160             flush_complete = 0;
2161         }
2162
2163         *pkt_temp = *pkt;
2164
2165         /* if update the audio clock with the pts */
2166         if (pkt->pts != AV_NOPTS_VALUE) {
2167             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2168         }
2169     }
2170 }
2171
2172 /* prepare a new audio buffer */
2173 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2174 {
2175     VideoState *is = opaque;
2176     int audio_size, len1;
2177     int bytes_per_sec;
2178     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2179     double pts;
2180
2181     audio_callback_time = av_gettime();
2182
2183     while (len > 0) {
2184         if (is->audio_buf_index >= is->audio_buf_size) {
2185            audio_size = audio_decode_frame(is, &pts);
2186            if (audio_size < 0) {
2187                 /* if error, just output silence */
2188                is->audio_buf      = is->silence_buf;
2189                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2190            } else {
2191                if (is->show_mode != SHOW_MODE_VIDEO)
2192                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2193                is->audio_buf_size = audio_size;
2194            }
2195            is->audio_buf_index = 0;
2196         }
2197         len1 = is->audio_buf_size - is->audio_buf_index;
2198         if (len1 > len)
2199             len1 = len;
2200         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2201         len -= len1;
2202         stream += len1;
2203         is->audio_buf_index += len1;
2204     }
2205     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2206     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2207     /* Let's assume the audio driver that is used by SDL has two periods. */
2208     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2209     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2210     if (is->audioq.serial == is->audio_pkt_temp_serial)
2211         check_external_clock_sync(is, is->audio_current_pts);
2212 }
2213
2214 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2215 {
2216     SDL_AudioSpec wanted_spec, spec;
2217     const char *env;
2218     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2219
2220     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2221     if (env) {
2222         wanted_nb_channels = atoi(env);
2223         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2224     }
2225     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2226         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2227         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2228     }
2229     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2230     wanted_spec.freq = wanted_sample_rate;
2231     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2232         fprintf(stderr, "Invalid sample rate or channel count!\n");
2233         return -1;
2234     }
2235     wanted_spec.format = AUDIO_S16SYS;
2236     wanted_spec.silence = 0;
2237     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2238     wanted_spec.callback = sdl_audio_callback;
2239     wanted_spec.userdata = opaque;
2240     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2241         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2242         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2243         if (!wanted_spec.channels) {
2244             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2245             return -1;
2246         }
2247         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2248     }
2249     if (spec.format != AUDIO_S16SYS) {
2250         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2251         return -1;
2252     }
2253     if (spec.channels != wanted_spec.channels) {
2254         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2255         if (!wanted_channel_layout) {
2256             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2257             return -1;
2258         }
2259     }
2260
2261     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2262     audio_hw_params->freq = spec.freq;
2263     audio_hw_params->channel_layout = wanted_channel_layout;
2264     audio_hw_params->channels =  spec.channels;
2265     return spec.size;
2266 }
2267
2268 /* open a given stream. Return 0 if OK */
2269 static int stream_component_open(VideoState *is, int stream_index)
2270 {
2271     AVFormatContext *ic = is->ic;
2272     AVCodecContext *avctx;
2273     AVCodec *codec;
2274     AVDictionary *opts;
2275     AVDictionaryEntry *t = NULL;
2276
2277     if (stream_index < 0 || stream_index >= ic->nb_streams)
2278         return -1;
2279     avctx = ic->streams[stream_index]->codec;
2280
2281     codec = avcodec_find_decoder(avctx->codec_id);
2282     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2283
2284     switch(avctx->codec_type){
2285         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2286         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2287         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2288     }
2289     if (!codec)
2290         return -1;
2291
2292     avctx->workaround_bugs   = workaround_bugs;
2293     avctx->lowres            = lowres;
2294     if(avctx->lowres > codec->max_lowres){
2295         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2296                 codec->max_lowres);
2297         avctx->lowres= codec->max_lowres;
2298     }
2299     avctx->idct_algo         = idct;
2300     avctx->skip_frame        = skip_frame;
2301     avctx->skip_idct         = skip_idct;
2302     avctx->skip_loop_filter  = skip_loop_filter;
2303     avctx->error_concealment = error_concealment;
2304
2305     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2306     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2307     if(codec->capabilities & CODEC_CAP_DR1)
2308         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2309
2310     if (!av_dict_get(opts, "threads", NULL, 0))
2311         av_dict_set(&opts, "threads", "auto", 0);
2312     if (!codec ||
2313         avcodec_open2(avctx, codec, &opts) < 0)
2314         return -1;
2315     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2316         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2317         return AVERROR_OPTION_NOT_FOUND;
2318     }
2319
2320     /* prepare audio output */
2321     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2322         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2323         if (audio_hw_buf_size < 0)
2324             return -1;
2325         is->audio_hw_buf_size = audio_hw_buf_size;
2326         is->audio_tgt = is->audio_src;
2327     }
2328
2329     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2330     switch (avctx->codec_type) {
2331     case AVMEDIA_TYPE_AUDIO:
2332         is->audio_stream = stream_index;
2333         is->audio_st = ic->streams[stream_index];
2334         is->audio_buf_size  = 0;
2335         is->audio_buf_index = 0;
2336
2337         /* init averaging filter */
2338         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2339         is->audio_diff_avg_count = 0;
2340         /* since we do not have a precise anough audio fifo fullness,
2341            we correct audio sync only if larger than this threshold */
2342         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2343
2344         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2345         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2346         packet_queue_start(&is->audioq);
2347         SDL_PauseAudio(0);
2348         break;
2349     case AVMEDIA_TYPE_VIDEO:
2350         is->video_stream = stream_index;
2351         is->video_st = ic->streams[stream_index];
2352
2353         packet_queue_start(&is->videoq);
2354         is->video_tid = SDL_CreateThread(video_thread, is);
2355         break;
2356     case AVMEDIA_TYPE_SUBTITLE:
2357         is->subtitle_stream = stream_index;
2358         is->subtitle_st = ic->streams[stream_index];
2359         packet_queue_start(&is->subtitleq);
2360
2361         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2362         break;
2363     default:
2364         break;
2365     }
2366     return 0;
2367 }
2368
2369 static void stream_component_close(VideoState *is, int stream_index)
2370 {
2371     AVFormatContext *ic = is->ic;
2372     AVCodecContext *avctx;
2373
2374     if (stream_index < 0 || stream_index >= ic->nb_streams)
2375         return;
2376     avctx = ic->streams[stream_index]->codec;
2377
2378     switch (avctx->codec_type) {
2379     case AVMEDIA_TYPE_AUDIO:
2380         packet_queue_abort(&is->audioq);
2381
2382         SDL_CloseAudio();
2383
2384         packet_queue_flush(&is->audioq);
2385         av_free_packet(&is->audio_pkt);
2386         swr_free(&is->swr_ctx);
2387         av_freep(&is->audio_buf1);
2388         is->audio_buf = NULL;
2389         avcodec_free_frame(&is->frame);
2390
2391         if (is->rdft) {
2392             av_rdft_end(is->rdft);
2393             av_freep(&is->rdft_data);
2394             is->rdft = NULL;
2395             is->rdft_bits = 0;
2396         }
2397         break;
2398     case AVMEDIA_TYPE_VIDEO:
2399         packet_queue_abort(&is->videoq);
2400
2401         /* note: we also signal this mutex to make sure we deblock the
2402            video thread in all cases */
2403         SDL_LockMutex(is->pictq_mutex);
2404         SDL_CondSignal(is->pictq_cond);
2405         SDL_UnlockMutex(is->pictq_mutex);
2406
2407         SDL_WaitThread(is->video_tid, NULL);
2408
2409         packet_queue_flush(&is->videoq);
2410         break;
2411     case AVMEDIA_TYPE_SUBTITLE:
2412         packet_queue_abort(&is->subtitleq);
2413
2414         /* note: we also signal this mutex to make sure we deblock the
2415            video thread in all cases */
2416         SDL_LockMutex(is->subpq_mutex);
2417         is->subtitle_stream_changed = 1;
2418
2419         SDL_CondSignal(is->subpq_cond);
2420         SDL_UnlockMutex(is->subpq_mutex);
2421
2422         SDL_WaitThread(is->subtitle_tid, NULL);
2423
2424         packet_queue_flush(&is->subtitleq);
2425         break;
2426     default:
2427         break;
2428     }
2429
2430     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2431     avcodec_close(avctx);
2432 #if CONFIG_AVFILTER
2433     free_buffer_pool(&is->buffer_pool);
2434 #endif
2435     switch (avctx->codec_type) {
2436     case AVMEDIA_TYPE_AUDIO:
2437         is->audio_st = NULL;
2438         is->audio_stream = -1;
2439         break;
2440     case AVMEDIA_TYPE_VIDEO:
2441         is->video_st = NULL;
2442         is->video_stream = -1;
2443         break;
2444     case AVMEDIA_TYPE_SUBTITLE:
2445         is->subtitle_st = NULL;
2446         is->subtitle_stream = -1;
2447         break;
2448     default:
2449         break;
2450     }
2451 }
2452
2453 static int decode_interrupt_cb(void *ctx)
2454 {
2455     VideoState *is = ctx;
2456     return is->abort_request;
2457 }
2458
2459 static int is_realtime(AVFormatContext *s)
2460 {
2461     if(   !strcmp(s->iformat->name, "rtp")
2462        || !strcmp(s->iformat->name, "rtsp")
2463        || !strcmp(s->iformat->name, "sdp")
2464     )
2465         return 1;
2466
2467     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2468                  || !strncmp(s->filename, "udp:", 4)
2469                 )
2470     )
2471         return 1;
2472     return 0;
2473 }
2474
2475 /* this thread gets the stream from the disk or the network */
2476 static int read_thread(void *arg)
2477 {
2478     VideoState *is = arg;
2479     AVFormatContext *ic = NULL;
2480     int err, i, ret;
2481     int st_index[AVMEDIA_TYPE_NB];
2482     AVPacket pkt1, *pkt = &pkt1;
2483     int eof = 0;
2484     int pkt_in_play_range = 0;
2485     AVDictionaryEntry *t;
2486     AVDictionary **opts;
2487     int orig_nb_streams;
2488     SDL_mutex *wait_mutex = SDL_CreateMutex();
2489
2490     memset(st_index, -1, sizeof(st_index));
2491     is->last_video_stream = is->video_stream = -1;
2492     is->last_audio_stream = is->audio_stream = -1;
2493     is->last_subtitle_stream = is->subtitle_stream = -1;
2494
2495     ic = avformat_alloc_context();
2496     ic->interrupt_callback.callback = decode_interrupt_cb;
2497     ic->interrupt_callback.opaque = is;
2498     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2499     if (err < 0) {
2500         print_error(is->filename, err);
2501         ret = -1;
2502         goto fail;
2503     }
2504     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2505         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2506         ret = AVERROR_OPTION_NOT_FOUND;
2507         goto fail;
2508     }
2509     is->ic = ic;
2510
2511     if (genpts)
2512         ic->flags |= AVFMT_FLAG_GENPTS;
2513
2514     opts = setup_find_stream_info_opts(ic, codec_opts);
2515     orig_nb_streams = ic->nb_streams;
2516
2517     err = avformat_find_stream_info(ic, opts);
2518     if (err < 0) {
2519         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2520         ret = -1;
2521         goto fail;
2522     }
2523     for (i = 0; i < orig_nb_streams; i++)
2524         av_dict_free(&opts[i]);
2525     av_freep(&opts);
2526
2527     if (ic->pb)
2528         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2529
2530     if (seek_by_bytes < 0)
2531         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2532
2533     /* if seeking requested, we execute it */
2534     if (start_time != AV_NOPTS_VALUE) {
2535         int64_t timestamp;
2536
2537         timestamp = start_time;
2538         /* add the stream start time */
2539         if (ic->start_time != AV_NOPTS_VALUE)
2540             timestamp += ic->start_time;
2541         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2542         if (ret < 0) {
2543             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2544                     is->filename, (double)timestamp / AV_TIME_BASE);
2545         }
2546     }
2547
2548     for (i = 0; i < ic->nb_streams; i++)
2549         ic->streams[i]->discard = AVDISCARD_ALL;
2550     if (!video_disable)
2551         st_index[AVMEDIA_TYPE_VIDEO] =
2552             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2553                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2554     if (!audio_disable)
2555         st_index[AVMEDIA_TYPE_AUDIO] =
2556             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2557                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2558                                 st_index[AVMEDIA_TYPE_VIDEO],
2559                                 NULL, 0);
2560     if (!video_disable)
2561         st_index[AVMEDIA_TYPE_SUBTITLE] =
2562             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2563                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2564                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2565                                  st_index[AVMEDIA_TYPE_AUDIO] :
2566                                  st_index[AVMEDIA_TYPE_VIDEO]),
2567                                 NULL, 0);
2568     if (show_status) {
2569         av_dump_format(ic, 0, is->filename, 0);
2570     }
2571
2572     is->show_mode = show_mode;
2573
2574     /* open the streams */
2575     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2576         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2577     }
2578
2579     ret = -1;
2580     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2581         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2582     }
2583     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2584     if (is->show_mode == SHOW_MODE_NONE)
2585         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2586
2587     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2588         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2589     }
2590
2591     if (is->video_stream < 0 && is->audio_stream < 0) {
2592         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2593         ret = -1;
2594         goto fail;
2595     }
2596
2597     if (infinite_buffer < 0 && is_realtime(ic))
2598         infinite_buffer = 1;
2599
2600     for (;;) {
2601         if (is->abort_request)
2602             break;
2603         if (is->paused != is->last_paused) {
2604             is->last_paused = is->paused;
2605             if (is->paused)
2606                 is->read_pause_return = av_read_pause(ic);
2607             else
2608                 av_read_play(ic);
2609         }
2610 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2611         if (is->paused &&
2612                 (!strcmp(ic->iformat->name, "rtsp") ||
2613                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2614             /* wait 10 ms to avoid trying to get another packet */
2615             /* XXX: horrible */
2616             SDL_Delay(10);
2617             continue;
2618         }
2619 #endif
2620         if (is->seek_req) {
2621             int64_t seek_target = is->seek_pos;
2622             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2623             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2624 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2625 //      of the seek_pos/seek_rel variables
2626
2627             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2628             if (ret < 0) {
2629                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2630             } else {
2631                 if (is->audio_stream >= 0) {
2632                     packet_queue_flush(&is->audioq);
2633                     packet_queue_put(&is->audioq, &flush_pkt);
2634                 }
2635                 if (is->subtitle_stream >= 0) {
2636                     packet_queue_flush(&is->subtitleq);
2637                     packet_queue_put(&is->subtitleq, &flush_pkt);
2638                 }
2639                 if (is->video_stream >= 0) {
2640                     packet_queue_flush(&is->videoq);
2641                     packet_queue_put(&is->videoq, &flush_pkt);
2642                 }
2643             }
2644             update_external_clock_pts(is, (seek_target + ic->start_time) / (double)AV_TIME_BASE);
2645             is->seek_req = 0;
2646             eof = 0;
2647         }
2648         if (is->que_attachments_req) {
2649             avformat_queue_attached_pictures(ic);
2650             is->que_attachments_req = 0;
2651         }
2652
2653         /* if the queue are full, no need to read more */
2654         if (infinite_buffer<1 &&
2655               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2656             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2657                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2658                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2659             /* wait 10 ms */
2660             SDL_LockMutex(wait_mutex);
2661             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2662             SDL_UnlockMutex(wait_mutex);
2663             continue;
2664         }
2665         if (eof) {
2666             if (is->video_stream >= 0) {
2667                 av_init_packet(pkt);
2668                 pkt->data = NULL;
2669                 pkt->size = 0;
2670                 pkt->stream_index = is->video_stream;
2671                 packet_queue_put(&is->videoq, pkt);
2672             }
2673             if (is->audio_stream >= 0 &&
2674                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2675                 av_init_packet(pkt);
2676                 pkt->data = NULL;
2677                 pkt->size = 0;
2678                 pkt->stream_index = is->audio_stream;
2679                 packet_queue_put(&is->audioq, pkt);
2680             }
2681             SDL_Delay(10);
2682             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2683                 if (loop != 1 && (!loop || --loop)) {
2684                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2685                 } else if (autoexit) {
2686                     ret = AVERROR_EOF;
2687                     goto fail;
2688                 }
2689             }
2690             eof=0;
2691             continue;
2692         }
2693         ret = av_read_frame(ic, pkt);
2694         if (ret < 0) {
2695             if (ret == AVERROR_EOF || url_feof(ic->pb))
2696                 eof = 1;
2697             if (ic->pb && ic->pb->error)
2698                 break;
2699             SDL_LockMutex(wait_mutex);
2700             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2701             SDL_UnlockMutex(wait_mutex);
2702             continue;
2703         }
2704         /* check if packet is in play range specified by user, then queue, otherwise discard */
2705         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2706                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2707                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2708                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2709                 <= ((double)duration / 1000000);
2710         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2711             packet_queue_put(&is->audioq, pkt);
2712         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2713             packet_queue_put(&is->videoq, pkt);
2714         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2715             packet_queue_put(&is->subtitleq, pkt);
2716         } else {
2717             av_free_packet(pkt);
2718         }
2719     }
2720     /* wait until the end */
2721     while (!is->abort_request) {
2722         SDL_Delay(100);
2723     }
2724
2725     ret = 0;
2726  fail:
2727     /* close each stream */
2728     if (is->audio_stream >= 0)
2729         stream_component_close(is, is->audio_stream);
2730     if (is->video_stream >= 0)
2731         stream_component_close(is, is->video_stream);
2732     if (is->subtitle_stream >= 0)
2733         stream_component_close(is, is->subtitle_stream);
2734     if (is->ic) {
2735         avformat_close_input(&is->ic);
2736     }
2737
2738     if (ret != 0) {
2739         SDL_Event event;
2740
2741         event.type = FF_QUIT_EVENT;
2742         event.user.data1 = is;
2743         SDL_PushEvent(&event);
2744     }
2745     SDL_DestroyMutex(wait_mutex);
2746     return 0;
2747 }
2748
2749 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2750 {
2751     VideoState *is;
2752
2753     is = av_mallocz(sizeof(VideoState));
2754     if (!is)
2755         return NULL;
2756     av_strlcpy(is->filename, filename, sizeof(is->filename));
2757     is->iformat = iformat;
2758     is->ytop    = 0;
2759     is->xleft   = 0;
2760
2761     /* start video display */
2762     is->pictq_mutex = SDL_CreateMutex();
2763     is->pictq_cond  = SDL_CreateCond();
2764
2765     is->subpq_mutex = SDL_CreateMutex();
2766     is->subpq_cond  = SDL_CreateCond();
2767
2768     packet_queue_init(&is->videoq);
2769     packet_queue_init(&is->audioq);
2770     packet_queue_init(&is->subtitleq);
2771
2772     is->continue_read_thread = SDL_CreateCond();
2773
2774     update_external_clock_pts(is, 0.0);
2775     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2776     is->video_current_pts_drift = is->audio_current_pts_drift;
2777     is->av_sync_type = av_sync_type;
2778     is->read_tid     = SDL_CreateThread(read_thread, is);
2779     if (!is->read_tid) {
2780         av_free(is);
2781         return NULL;
2782     }
2783     return is;
2784 }
2785
2786 static void stream_cycle_channel(VideoState *is, int codec_type)
2787 {
2788     AVFormatContext *ic = is->ic;
2789     int start_index, stream_index;
2790     int old_index;
2791     AVStream *st;
2792
2793     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2794         start_index = is->last_video_stream;
2795         old_index = is->video_stream;
2796     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2797         start_index = is->last_audio_stream;
2798         old_index = is->audio_stream;
2799     } else {
2800         start_index = is->last_subtitle_stream;
2801         old_index = is->subtitle_stream;
2802     }
2803     stream_index = start_index;
2804     for (;;) {
2805         if (++stream_index >= is->ic->nb_streams)
2806         {
2807             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2808             {
2809                 stream_index = -1;
2810                 is->last_subtitle_stream = -1;
2811                 goto the_end;
2812             }
2813             if (start_index == -1)
2814                 return;
2815             stream_index = 0;
2816         }
2817         if (stream_index == start_index)
2818             return;
2819         st = ic->streams[stream_index];
2820         if (st->codec->codec_type == codec_type) {
2821             /* check that parameters are OK */
2822             switch (codec_type) {
2823             case AVMEDIA_TYPE_AUDIO:
2824                 if (st->codec->sample_rate != 0 &&
2825                     st->codec->channels != 0)
2826                     goto the_end;
2827                 break;
2828             case AVMEDIA_TYPE_VIDEO:
2829             case AVMEDIA_TYPE_SUBTITLE:
2830                 goto the_end;
2831             default:
2832                 break;
2833             }
2834         }
2835     }
2836  the_end:
2837     stream_component_close(is, old_index);
2838     stream_component_open(is, stream_index);
2839     if (codec_type == AVMEDIA_TYPE_VIDEO)
2840         is->que_attachments_req = 1;
2841 }
2842
2843
2844 static void toggle_full_screen(VideoState *is)
2845 {
2846 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2847     /* OS X needs to reallocate the SDL overlays */
2848     int i;
2849     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2850         is->pictq[i].reallocate = 1;
2851 #endif
2852     is_full_screen = !is_full_screen;
2853     video_open(is, 1);
2854 }
2855
2856 static void toggle_pause(VideoState *is)
2857 {
2858     stream_toggle_pause(is);
2859     is->step = 0;
2860 }
2861
2862 static void step_to_next_frame(VideoState *is)
2863 {
2864     /* if the stream is paused unpause it, then step */
2865     if (is->paused)
2866         stream_toggle_pause(is);
2867     is->step = 1;
2868 }
2869
2870 static void toggle_audio_display(VideoState *is)
2871 {
2872     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2873     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2874     fill_rectangle(screen,
2875                 is->xleft, is->ytop, is->width, is->height,
2876                 bgcolor, 1);
2877 }
2878
2879 /* handle an event sent by the GUI */
2880 static void event_loop(VideoState *cur_stream)
2881 {
2882     SDL_Event event;
2883     double incr, pos, frac;
2884
2885     for (;;) {
2886         double x;
2887         SDL_WaitEvent(&event);
2888         switch (event.type) {
2889         case SDL_KEYDOWN:
2890             if (exit_on_keydown) {
2891                 do_exit(cur_stream);
2892                 break;
2893             }
2894             switch (event.key.keysym.sym) {
2895             case SDLK_ESCAPE:
2896             case SDLK_q:
2897                 do_exit(cur_stream);
2898                 break;
2899             case SDLK_f:
2900                 toggle_full_screen(cur_stream);
2901                 cur_stream->force_refresh = 1;
2902                 break;
2903             case SDLK_p:
2904             case SDLK_SPACE:
2905                 toggle_pause(cur_stream);
2906                 break;
2907             case SDLK_s: // S: Step to next frame
2908                 step_to_next_frame(cur_stream);
2909                 break;
2910             case SDLK_a:
2911                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2912                 break;
2913             case SDLK_v:
2914                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2915                 break;
2916             case SDLK_t:
2917                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2918                 break;
2919             case SDLK_w:
2920                 toggle_audio_display(cur_stream);
2921                 cur_stream->force_refresh = 1;
2922                 break;
2923             case SDLK_PAGEUP:
2924                 incr = 600.0;
2925                 goto do_seek;
2926             case SDLK_PAGEDOWN:
2927                 incr = -600.0;
2928                 goto do_seek;
2929             case SDLK_LEFT:
2930                 incr = -10.0;
2931                 goto do_seek;
2932             case SDLK_RIGHT:
2933                 incr = 10.0;
2934                 goto do_seek;
2935             case SDLK_UP:
2936                 incr = 60.0;
2937                 goto do_seek;
2938             case SDLK_DOWN:
2939                 incr = -60.0;
2940             do_seek:
2941                     if (seek_by_bytes) {
2942                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2943                             pos = cur_stream->video_current_pos;
2944                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2945                             pos = cur_stream->audio_pkt.pos;
2946                         } else
2947                             pos = avio_tell(cur_stream->ic->pb);
2948                         if (cur_stream->ic->bit_rate)
2949                             incr *= cur_stream->ic->bit_rate / 8.0;
2950                         else
2951                             incr *= 180000.0;
2952                         pos += incr;
2953                         stream_seek(cur_stream, pos, incr, 1);
2954                     } else {
2955                         pos = get_master_clock(cur_stream);
2956                         pos += incr;
2957                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2958                     }
2959                 break;
2960             default:
2961                 break;
2962             }
2963             break;
2964         case SDL_VIDEOEXPOSE:
2965             cur_stream->force_refresh = 1;
2966             break;
2967         case SDL_MOUSEBUTTONDOWN:
2968             if (exit_on_mousedown) {
2969                 do_exit(cur_stream);
2970                 break;
2971             }
2972         case SDL_MOUSEMOTION:
2973             if (event.type == SDL_MOUSEBUTTONDOWN) {
2974                 x = event.button.x;
2975             } else {
2976                 if (event.motion.state != SDL_PRESSED)
2977                     break;
2978                 x = event.motion.x;
2979             }
2980                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2981                     uint64_t size =  avio_size(cur_stream->ic->pb);
2982                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2983                 } else {
2984                     int64_t ts;
2985                     int ns, hh, mm, ss;
2986                     int tns, thh, tmm, tss;
2987                     tns  = cur_stream->ic->duration / 1000000LL;
2988                     thh  = tns / 3600;
2989                     tmm  = (tns % 3600) / 60;
2990                     tss  = (tns % 60);
2991                     frac = x / cur_stream->width;
2992                     ns   = frac * tns;
2993                     hh   = ns / 3600;
2994                     mm   = (ns % 3600) / 60;
2995                     ss   = (ns % 60);
2996                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2997                             hh, mm, ss, thh, tmm, tss);
2998                     ts = frac * cur_stream->ic->duration;
2999                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3000                         ts += cur_stream->ic->start_time;
3001                     stream_seek(cur_stream, ts, 0, 0);
3002                 }
3003             break;
3004         case SDL_VIDEORESIZE:
3005                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3006                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3007                 screen_width  = cur_stream->width  = event.resize.w;
3008                 screen_height = cur_stream->height = event.resize.h;
3009                 cur_stream->force_refresh = 1;
3010             break;
3011         case SDL_QUIT:
3012         case FF_QUIT_EVENT:
3013             do_exit(cur_stream);
3014             break;
3015         case FF_ALLOC_EVENT:
3016             alloc_picture(event.user.data1);
3017             break;
3018         case FF_REFRESH_EVENT:
3019             video_refresh(event.user.data1);
3020             cur_stream->refresh = 0;
3021             break;
3022         default:
3023             break;
3024         }
3025     }
3026 }
3027
3028 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3029 {
3030     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3031     return opt_default(NULL, "video_size", arg);
3032 }
3033
3034 static int opt_width(void *optctx, const char *opt, const char *arg)
3035 {
3036     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3037     return 0;
3038 }
3039
3040 static int opt_height(void *optctx, const char *opt, const char *arg)
3041 {
3042     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3043     return 0;
3044 }
3045
3046 static int opt_format(void *optctx, const char *opt, const char *arg)
3047 {
3048     file_iformat = av_find_input_format(arg);
3049     if (!file_iformat) {
3050         fprintf(stderr, "Unknown input format: %s\n", arg);
3051         return AVERROR(EINVAL);
3052     }
3053     return 0;
3054 }
3055
3056 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3057 {
3058     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3059     return opt_default(NULL, "pixel_format", arg);
3060 }
3061
3062 static int opt_sync(void *optctx, const char *opt, const char *arg)
3063 {
3064     if (!strcmp(arg, "audio"))
3065         av_sync_type = AV_SYNC_AUDIO_MASTER;
3066     else if (!strcmp(arg, "video"))
3067         av_sync_type = AV_SYNC_VIDEO_MASTER;
3068     else if (!strcmp(arg, "ext"))
3069         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3070     else {
3071         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3072         exit(1);
3073     }
3074     return 0;
3075 }
3076
3077 static int opt_seek(void *optctx, const char *opt, const char *arg)
3078 {
3079     start_time = parse_time_or_die(opt, arg, 1);
3080     return 0;
3081 }
3082
3083 static int opt_duration(void *optctx, const char *opt, const char *arg)
3084 {
3085     duration = parse_time_or_die(opt, arg, 1);
3086     return 0;
3087 }
3088
3089 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3090 {
3091     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3092                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3093                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3094                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3095     return 0;
3096 }
3097
3098 static void opt_input_file(void *optctx, const char *filename)
3099 {
3100     if (input_filename) {
3101         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3102                 filename, input_filename);
3103         exit(1);
3104     }
3105     if (!strcmp(filename, "-"))
3106         filename = "pipe:";
3107     input_filename = filename;
3108 }
3109
3110 static int opt_codec(void *o, const char *opt, const char *arg)
3111 {
3112     switch(opt[strlen(opt)-1]){
3113     case 'a' :    audio_codec_name = arg; break;
3114     case 's' : subtitle_codec_name = arg; break;
3115     case 'v' :    video_codec_name = arg; break;
3116     }
3117     return 0;
3118 }
3119
3120 static int dummy;
3121
3122 static const OptionDef options[] = {
3123 #include "cmdutils_common_opts.h"
3124     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3125     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3126     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3127     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3128     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3129     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3130     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3131     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3132     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3133     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3134     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3135     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3136     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3137     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3138     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3139     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3140     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3141     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3142     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3143     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3144     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3145     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3146     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3147     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3148     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3149     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3150     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3151     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3152     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3153     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3154     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3155     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3156     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3157     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3158 #if CONFIG_AVFILTER
3159     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3160 #endif
3161     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3162     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3163     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3164     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3165     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3166     { NULL, },
3167 };
3168
3169 static void show_usage(void)
3170 {
3171     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3172     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3173     av_log(NULL, AV_LOG_INFO, "\n");
3174 }
3175
3176 void show_help_default(const char *opt, const char *arg)
3177 {
3178     av_log_set_callback(log_callback_help);
3179     show_usage();
3180     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3181     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3182     printf("\n");
3183     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3184     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3185 #if !CONFIG_AVFILTER
3186     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3187 #else
3188     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3189 #endif
3190     printf("\nWhile playing:\n"
3191            "q, ESC              quit\n"
3192            "f                   toggle full screen\n"
3193            "p, SPC              pause\n"
3194            "a                   cycle audio channel\n"
3195            "v                   cycle video channel\n"
3196            "t                   cycle subtitle channel\n"
3197            "w                   show audio waves\n"
3198            "s                   activate frame-step mode\n"
3199            "left/right          seek backward/forward 10 seconds\n"
3200            "down/up             seek backward/forward 1 minute\n"
3201            "page down/page up   seek backward/forward 10 minutes\n"
3202            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3203            );
3204 }
3205
3206 static int lockmgr(void **mtx, enum AVLockOp op)
3207 {
3208    switch(op) {
3209       case AV_LOCK_CREATE:
3210           *mtx = SDL_CreateMutex();
3211           if(!*mtx)
3212               return 1;
3213           return 0;
3214       case AV_LOCK_OBTAIN:
3215           return !!SDL_LockMutex(*mtx);
3216       case AV_LOCK_RELEASE:
3217           return !!SDL_UnlockMutex(*mtx);
3218       case AV_LOCK_DESTROY:
3219           SDL_DestroyMutex(*mtx);
3220           return 0;
3221    }
3222    return 1;
3223 }
3224
3225 /* Called from the main */
3226 int main(int argc, char **argv)
3227 {
3228     int flags;
3229     VideoState *is;
3230     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3231
3232     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3233     parse_loglevel(argc, argv, options);
3234
3235     /* register all codecs, demux and protocols */
3236     avcodec_register_all();
3237 #if CONFIG_AVDEVICE
3238     avdevice_register_all();
3239 #endif
3240 #if CONFIG_AVFILTER
3241     avfilter_register_all();
3242 #endif
3243     av_register_all();
3244     avformat_network_init();
3245
3246     init_opts();
3247
3248     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3249     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3250
3251     show_banner(argc, argv, options);
3252
3253     parse_options(NULL, argc, argv, options, opt_input_file);
3254
3255     if (!input_filename) {
3256         show_usage();
3257         fprintf(stderr, "An input file must be specified\n");
3258         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3259         exit(1);
3260     }
3261
3262     if (display_disable) {
3263         video_disable = 1;
3264     }
3265     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3266     if (audio_disable)
3267         flags &= ~SDL_INIT_AUDIO;
3268     if (display_disable)
3269         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3270 #if !defined(__MINGW32__) && !defined(__APPLE__)
3271     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3272 #endif
3273     if (SDL_Init (flags)) {
3274         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3275         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3276         exit(1);
3277     }
3278
3279     if (!display_disable) {
3280 #if HAVE_SDL_VIDEO_SIZE
3281         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3282         fs_screen_width = vi->current_w;
3283         fs_screen_height = vi->current_h;
3284 #endif
3285     }
3286
3287     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3288     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3289     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3290
3291     if (av_lockmgr_register(lockmgr)) {
3292         fprintf(stderr, "Could not initialize lock manager!\n");
3293         do_exit(NULL);
3294     }
3295
3296     av_init_packet(&flush_pkt);
3297     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3298
3299     is = stream_open(input_filename, file_iformat);
3300     if (!is) {
3301         fprintf(stderr, "Failed to initialize VideoState!\n");
3302         do_exit(NULL);
3303     }
3304
3305     event_loop(is);
3306
3307     /* never returns */
3308
3309     return 0;
3310 }