ffplay: add serial field to PacketQueue entry and populate it
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
46 #include "libswresample/swresample.h"
47
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55
56 #include <SDL.h>
57 #include <SDL_thread.h>
58
59 #include "cmdutils.h"
60
61 #include <assert.h>
62
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
86 #define SAMPLE_ARRAY_SIZE (8 * 65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct MyAVPacketList {
91     AVPacket pkt;
92     struct MyAVPacketList *next;
93     int serial;
94 } MyAVPacketList;
95
96 typedef struct PacketQueue {
97     MyAVPacketList *first_pkt, *last_pkt;
98     int nb_packets;
99     int size;
100     int abort_request;
101     int serial;
102     SDL_mutex *mutex;
103     SDL_cond *cond;
104 } PacketQueue;
105
106 #define VIDEO_PICTURE_QUEUE_SIZE 4
107 #define SUBPICTURE_QUEUE_SIZE 4
108
109 typedef struct VideoPicture {
110     double pts;             // presentation timestamp for this picture
111     int64_t pos;            // byte position in file
112     int skip;
113     SDL_Overlay *bmp;
114     int width, height; /* source height & width */
115     AVRational sample_aspect_ratio;
116     int allocated;
117     int reallocate;
118     int serial;
119
120 #if CONFIG_AVFILTER
121     AVFilterBufferRef *picref;
122 #endif
123 } VideoPicture;
124
125 typedef struct SubPicture {
126     double pts; /* presentation time stamp for this picture */
127     AVSubtitle sub;
128 } SubPicture;
129
130 typedef struct AudioParams {
131     int freq;
132     int channels;
133     int channel_layout;
134     enum AVSampleFormat fmt;
135 } AudioParams;
136
137 enum {
138     AV_SYNC_AUDIO_MASTER, /* default choice */
139     AV_SYNC_VIDEO_MASTER,
140     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
141 };
142
143 typedef struct VideoState {
144     SDL_Thread *read_tid;
145     SDL_Thread *video_tid;
146     SDL_Thread *refresh_tid;
147     AVInputFormat *iformat;
148     int no_background;
149     int abort_request;
150     int force_refresh;
151     int paused;
152     int last_paused;
153     int que_attachments_req;
154     int seek_req;
155     int seek_flags;
156     int64_t seek_pos;
157     int64_t seek_rel;
158     int read_pause_return;
159     AVFormatContext *ic;
160
161     int audio_stream;
162
163     int av_sync_type;
164     double external_clock;                   ///< external clock base
165     double external_clock_drift;             ///< external clock base - time (av_gettime) at which we updated external_clock
166     int64_t external_clock_time;             ///< last reference time
167
168     double audio_clock;
169     double audio_diff_cum; /* used for AV difference average computation */
170     double audio_diff_avg_coef;
171     double audio_diff_threshold;
172     int audio_diff_avg_count;
173     AVStream *audio_st;
174     PacketQueue audioq;
175     int audio_hw_buf_size;
176     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
177     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
178     uint8_t *audio_buf;
179     uint8_t *audio_buf1;
180     unsigned int audio_buf_size; /* in bytes */
181     int audio_buf_index; /* in bytes */
182     int audio_write_buf_size;
183     AVPacket audio_pkt_temp;
184     AVPacket audio_pkt;
185     int audio_pkt_temp_serial;
186     struct AudioParams audio_src;
187     struct AudioParams audio_tgt;
188     struct SwrContext *swr_ctx;
189     double audio_current_pts;
190     double audio_current_pts_drift;
191     int frame_drops_early;
192     int frame_drops_late;
193     AVFrame *frame;
194
195     enum ShowMode {
196         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
197     } show_mode;
198     int16_t sample_array[SAMPLE_ARRAY_SIZE];
199     int sample_array_index;
200     int last_i_start;
201     RDFTContext *rdft;
202     int rdft_bits;
203     FFTSample *rdft_data;
204     int xpos;
205
206     SDL_Thread *subtitle_tid;
207     int subtitle_stream;
208     int subtitle_stream_changed;
209     AVStream *subtitle_st;
210     PacketQueue subtitleq;
211     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
212     int subpq_size, subpq_rindex, subpq_windex;
213     SDL_mutex *subpq_mutex;
214     SDL_cond *subpq_cond;
215
216     double frame_timer;
217     double frame_last_pts;
218     double frame_last_duration;
219     double frame_last_dropped_pts;
220     double frame_last_returned_time;
221     double frame_last_filter_delay;
222     int64_t frame_last_dropped_pos;
223     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
224     int video_stream;
225     AVStream *video_st;
226     PacketQueue videoq;
227     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
228     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
229     int64_t video_current_pos;      // current displayed file pos
230     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
231     int pictq_size, pictq_rindex, pictq_windex;
232     SDL_mutex *pictq_mutex;
233     SDL_cond *pictq_cond;
234 #if !CONFIG_AVFILTER
235     struct SwsContext *img_convert_ctx;
236 #endif
237
238     char filename[1024];
239     int width, height, xleft, ytop;
240     int step;
241
242 #if CONFIG_AVFILTER
243     AVFilterContext *in_video_filter;   // the first filter in the video chain
244     AVFilterContext *out_video_filter;  // the last filter in the video chain
245     int use_dr1;
246     FrameBuffer *buffer_pool;
247 #endif
248
249     int refresh;
250     int last_video_stream, last_audio_stream, last_subtitle_stream;
251
252     SDL_cond *continue_read_thread;
253 } VideoState;
254
255 /* options specified by the user */
256 static AVInputFormat *file_iformat;
257 static const char *input_filename;
258 static const char *window_title;
259 static int fs_screen_width;
260 static int fs_screen_height;
261 static int screen_width  = 0;
262 static int screen_height = 0;
263 static int audio_disable;
264 static int video_disable;
265 static int wanted_stream[AVMEDIA_TYPE_NB] = {
266     [AVMEDIA_TYPE_AUDIO]    = -1,
267     [AVMEDIA_TYPE_VIDEO]    = -1,
268     [AVMEDIA_TYPE_SUBTITLE] = -1,
269 };
270 static int seek_by_bytes = -1;
271 static int display_disable;
272 static int show_status = 1;
273 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
274 static int64_t start_time = AV_NOPTS_VALUE;
275 static int64_t duration = AV_NOPTS_VALUE;
276 static int workaround_bugs = 1;
277 static int fast = 0;
278 static int genpts = 0;
279 static int lowres = 0;
280 static int idct = FF_IDCT_AUTO;
281 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
282 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
283 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
284 static int error_concealment = 3;
285 static int decoder_reorder_pts = -1;
286 static int autoexit;
287 static int exit_on_keydown;
288 static int exit_on_mousedown;
289 static int loop = 1;
290 static int framedrop = -1;
291 static int infinite_buffer = -1;
292 static enum ShowMode show_mode = SHOW_MODE_NONE;
293 static const char *audio_codec_name;
294 static const char *subtitle_codec_name;
295 static const char *video_codec_name;
296 static int rdftspeed = 20;
297 #if CONFIG_AVFILTER
298 static char *vfilters = NULL;
299 #endif
300
301 /* current context */
302 static int is_full_screen;
303 static int64_t audio_callback_time;
304
305 static AVPacket flush_pkt;
306
307 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
308 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
309 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
310
311 static SDL_Surface *screen;
312
313 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
314
315 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
316 {
317     MyAVPacketList *pkt1;
318
319     if (q->abort_request)
320        return -1;
321
322     pkt1 = av_malloc(sizeof(MyAVPacketList));
323     if (!pkt1)
324         return -1;
325     pkt1->pkt = *pkt;
326     pkt1->next = NULL;
327     if (pkt == &flush_pkt)
328         q->serial++;
329     pkt1->serial = q->serial;
330
331     if (!q->last_pkt)
332         q->first_pkt = pkt1;
333     else
334         q->last_pkt->next = pkt1;
335     q->last_pkt = pkt1;
336     q->nb_packets++;
337     q->size += pkt1->pkt.size + sizeof(*pkt1);
338     /* XXX: should duplicate packet data in DV case */
339     SDL_CondSignal(q->cond);
340     return 0;
341 }
342
343 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
344 {
345     int ret;
346
347     /* duplicate the packet */
348     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
349         return -1;
350
351     SDL_LockMutex(q->mutex);
352     ret = packet_queue_put_private(q, pkt);
353     SDL_UnlockMutex(q->mutex);
354
355     if (pkt != &flush_pkt && ret < 0)
356         av_free_packet(pkt);
357
358     return ret;
359 }
360
361 /* packet queue handling */
362 static void packet_queue_init(PacketQueue *q)
363 {
364     memset(q, 0, sizeof(PacketQueue));
365     q->mutex = SDL_CreateMutex();
366     q->cond = SDL_CreateCond();
367     q->abort_request = 1;
368 }
369
370 static void packet_queue_flush(PacketQueue *q)
371 {
372     MyAVPacketList *pkt, *pkt1;
373
374     SDL_LockMutex(q->mutex);
375     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
376         pkt1 = pkt->next;
377         av_free_packet(&pkt->pkt);
378         av_freep(&pkt);
379     }
380     q->last_pkt = NULL;
381     q->first_pkt = NULL;
382     q->nb_packets = 0;
383     q->size = 0;
384     SDL_UnlockMutex(q->mutex);
385 }
386
387 static void packet_queue_destroy(PacketQueue *q)
388 {
389     packet_queue_flush(q);
390     SDL_DestroyMutex(q->mutex);
391     SDL_DestroyCond(q->cond);
392 }
393
394 static void packet_queue_abort(PacketQueue *q)
395 {
396     SDL_LockMutex(q->mutex);
397
398     q->abort_request = 1;
399
400     SDL_CondSignal(q->cond);
401
402     SDL_UnlockMutex(q->mutex);
403 }
404
405 static void packet_queue_start(PacketQueue *q)
406 {
407     SDL_LockMutex(q->mutex);
408     q->abort_request = 0;
409     packet_queue_put_private(q, &flush_pkt);
410     SDL_UnlockMutex(q->mutex);
411 }
412
413 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
414 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
415 {
416     MyAVPacketList *pkt1;
417     int ret;
418
419     SDL_LockMutex(q->mutex);
420
421     for (;;) {
422         if (q->abort_request) {
423             ret = -1;
424             break;
425         }
426
427         pkt1 = q->first_pkt;
428         if (pkt1) {
429             q->first_pkt = pkt1->next;
430             if (!q->first_pkt)
431                 q->last_pkt = NULL;
432             q->nb_packets--;
433             q->size -= pkt1->pkt.size + sizeof(*pkt1);
434             *pkt = pkt1->pkt;
435             if (serial)
436                 *serial = pkt1->serial;
437             av_free(pkt1);
438             ret = 1;
439             break;
440         } else if (!block) {
441             ret = 0;
442             break;
443         } else {
444             SDL_CondWait(q->cond, q->mutex);
445         }
446     }
447     SDL_UnlockMutex(q->mutex);
448     return ret;
449 }
450
451 static inline void fill_rectangle(SDL_Surface *screen,
452                                   int x, int y, int w, int h, int color)
453 {
454     SDL_Rect rect;
455     rect.x = x;
456     rect.y = y;
457     rect.w = w;
458     rect.h = h;
459     SDL_FillRect(screen, &rect, color);
460 }
461
462 #define ALPHA_BLEND(a, oldp, newp, s)\
463 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
464
465 #define RGBA_IN(r, g, b, a, s)\
466 {\
467     unsigned int v = ((const uint32_t *)(s))[0];\
468     a = (v >> 24) & 0xff;\
469     r = (v >> 16) & 0xff;\
470     g = (v >> 8) & 0xff;\
471     b = v & 0xff;\
472 }
473
474 #define YUVA_IN(y, u, v, a, s, pal)\
475 {\
476     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
477     a = (val >> 24) & 0xff;\
478     y = (val >> 16) & 0xff;\
479     u = (val >> 8) & 0xff;\
480     v = val & 0xff;\
481 }
482
483 #define YUVA_OUT(d, y, u, v, a)\
484 {\
485     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
486 }
487
488
489 #define BPP 1
490
491 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
492 {
493     int wrap, wrap3, width2, skip2;
494     int y, u, v, a, u1, v1, a1, w, h;
495     uint8_t *lum, *cb, *cr;
496     const uint8_t *p;
497     const uint32_t *pal;
498     int dstx, dsty, dstw, dsth;
499
500     dstw = av_clip(rect->w, 0, imgw);
501     dsth = av_clip(rect->h, 0, imgh);
502     dstx = av_clip(rect->x, 0, imgw - dstw);
503     dsty = av_clip(rect->y, 0, imgh - dsth);
504     lum = dst->data[0] + dsty * dst->linesize[0];
505     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
506     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
507
508     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
509     skip2 = dstx >> 1;
510     wrap = dst->linesize[0];
511     wrap3 = rect->pict.linesize[0];
512     p = rect->pict.data[0];
513     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
514
515     if (dsty & 1) {
516         lum += dstx;
517         cb += skip2;
518         cr += skip2;
519
520         if (dstx & 1) {
521             YUVA_IN(y, u, v, a, p, pal);
522             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
524             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
525             cb++;
526             cr++;
527             lum++;
528             p += BPP;
529         }
530         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
531             YUVA_IN(y, u, v, a, p, pal);
532             u1 = u;
533             v1 = v;
534             a1 = a;
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
537             YUVA_IN(y, u, v, a, p + BPP, pal);
538             u1 += u;
539             v1 += v;
540             a1 += a;
541             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
543             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
544             cb++;
545             cr++;
546             p += 2 * BPP;
547             lum += 2;
548         }
549         if (w) {
550             YUVA_IN(y, u, v, a, p, pal);
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
553             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
554             p++;
555             lum++;
556         }
557         p += wrap3 - dstw * BPP;
558         lum += wrap - dstw - dstx;
559         cb += dst->linesize[1] - width2 - skip2;
560         cr += dst->linesize[2] - width2 - skip2;
561     }
562     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
563         lum += dstx;
564         cb += skip2;
565         cr += skip2;
566
567         if (dstx & 1) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 = u;
590             v1 = v;
591             a1 = a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599             p += wrap3;
600             lum += wrap;
601
602             YUVA_IN(y, u, v, a, p, pal);
603             u1 += u;
604             v1 += v;
605             a1 += a;
606             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607
608             YUVA_IN(y, u, v, a, p + BPP, pal);
609             u1 += u;
610             v1 += v;
611             a1 += a;
612             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613
614             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
615             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
616
617             cb++;
618             cr++;
619             p += -wrap3 + 2 * BPP;
620             lum += -wrap + 2;
621         }
622         if (w) {
623             YUVA_IN(y, u, v, a, p, pal);
624             u1 = u;
625             v1 = v;
626             a1 = a;
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628             p += wrap3;
629             lum += wrap;
630             YUVA_IN(y, u, v, a, p, pal);
631             u1 += u;
632             v1 += v;
633             a1 += a;
634             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
636             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
637             cb++;
638             cr++;
639             p += -wrap3 + BPP;
640             lum += -wrap + 1;
641         }
642         p += wrap3 + (wrap3 - dstw * BPP);
643         lum += wrap + (wrap - dstw - dstx);
644         cb += dst->linesize[1] - width2 - skip2;
645         cr += dst->linesize[2] - width2 - skip2;
646     }
647     /* handle odd height */
648     if (h) {
649         lum += dstx;
650         cb += skip2;
651         cr += skip2;
652
653         if (dstx & 1) {
654             YUVA_IN(y, u, v, a, p, pal);
655             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
657             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
658             cb++;
659             cr++;
660             lum++;
661             p += BPP;
662         }
663         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
664             YUVA_IN(y, u, v, a, p, pal);
665             u1 = u;
666             v1 = v;
667             a1 = a;
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669
670             YUVA_IN(y, u, v, a, p + BPP, pal);
671             u1 += u;
672             v1 += v;
673             a1 += a;
674             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
675             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
676             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
677             cb++;
678             cr++;
679             p += 2 * BPP;
680             lum += 2;
681         }
682         if (w) {
683             YUVA_IN(y, u, v, a, p, pal);
684             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
685             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
686             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
687         }
688     }
689 }
690
691 static void free_subpicture(SubPicture *sp)
692 {
693     avsubtitle_free(&sp->sub);
694 }
695
696 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
697 {
698     float aspect_ratio;
699     int width, height, x, y;
700
701     if (vp->sample_aspect_ratio.num == 0)
702         aspect_ratio = 0;
703     else
704         aspect_ratio = av_q2d(vp->sample_aspect_ratio);
705
706     if (aspect_ratio <= 0.0)
707         aspect_ratio = 1.0;
708     aspect_ratio *= (float)vp->width / (float)vp->height;
709
710     /* XXX: we suppose the screen has a 1.0 pixel ratio */
711     height = scr_height;
712     width = ((int)rint(height * aspect_ratio)) & ~1;
713     if (width > scr_width) {
714         width = scr_width;
715         height = ((int)rint(width / aspect_ratio)) & ~1;
716     }
717     x = (scr_width - width) / 2;
718     y = (scr_height - height) / 2;
719     rect->x = scr_xleft + x;
720     rect->y = scr_ytop  + y;
721     rect->w = FFMAX(width,  1);
722     rect->h = FFMAX(height, 1);
723 }
724
725 static void video_image_display(VideoState *is)
726 {
727     VideoPicture *vp;
728     SubPicture *sp;
729     AVPicture pict;
730     SDL_Rect rect;
731     int i;
732
733     vp = &is->pictq[is->pictq_rindex];
734     if (vp->bmp) {
735         if (is->subtitle_st) {
736             if (is->subpq_size > 0) {
737                 sp = &is->subpq[is->subpq_rindex];
738
739                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
740                     SDL_LockYUVOverlay (vp->bmp);
741
742                     pict.data[0] = vp->bmp->pixels[0];
743                     pict.data[1] = vp->bmp->pixels[2];
744                     pict.data[2] = vp->bmp->pixels[1];
745
746                     pict.linesize[0] = vp->bmp->pitches[0];
747                     pict.linesize[1] = vp->bmp->pitches[2];
748                     pict.linesize[2] = vp->bmp->pitches[1];
749
750                     for (i = 0; i < sp->sub.num_rects; i++)
751                         blend_subrect(&pict, sp->sub.rects[i],
752                                       vp->bmp->w, vp->bmp->h);
753
754                     SDL_UnlockYUVOverlay (vp->bmp);
755                 }
756             }
757         }
758
759         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
760
761         SDL_DisplayYUVOverlay(vp->bmp, &rect);
762     }
763 }
764
765 static inline int compute_mod(int a, int b)
766 {
767     return a < 0 ? a%b + b : a%b;
768 }
769
770 static void video_audio_display(VideoState *s)
771 {
772     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
773     int ch, channels, h, h2, bgcolor, fgcolor;
774     int16_t time_diff;
775     int rdft_bits, nb_freq;
776
777     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
778         ;
779     nb_freq = 1 << (rdft_bits - 1);
780
781     /* compute display index : center on currently output samples */
782     channels = s->audio_tgt.channels;
783     nb_display_channels = channels;
784     if (!s->paused) {
785         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
786         n = 2 * channels;
787         delay = s->audio_write_buf_size;
788         delay /= n;
789
790         /* to be more precise, we take into account the time spent since
791            the last buffer computation */
792         if (audio_callback_time) {
793             time_diff = av_gettime() - audio_callback_time;
794             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
795         }
796
797         delay += 2 * data_used;
798         if (delay < data_used)
799             delay = data_used;
800
801         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
802         if (s->show_mode == SHOW_MODE_WAVES) {
803             h = INT_MIN;
804             for (i = 0; i < 1000; i += channels) {
805                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
806                 int a = s->sample_array[idx];
807                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
808                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
809                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
810                 int score = a - d;
811                 if (h < score && (b ^ c) < 0) {
812                     h = score;
813                     i_start = idx;
814                 }
815             }
816         }
817
818         s->last_i_start = i_start;
819     } else {
820         i_start = s->last_i_start;
821     }
822
823     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
824     if (s->show_mode == SHOW_MODE_WAVES) {
825         fill_rectangle(screen,
826                        s->xleft, s->ytop, s->width, s->height,
827                        bgcolor);
828
829         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
830
831         /* total height for one channel */
832         h = s->height / nb_display_channels;
833         /* graph height / 2 */
834         h2 = (h * 9) / 20;
835         for (ch = 0; ch < nb_display_channels; ch++) {
836             i = i_start + ch;
837             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
838             for (x = 0; x < s->width; x++) {
839                 y = (s->sample_array[i] * h2) >> 15;
840                 if (y < 0) {
841                     y = -y;
842                     ys = y1 - y;
843                 } else {
844                     ys = y1;
845                 }
846                 fill_rectangle(screen,
847                                s->xleft + x, ys, 1, y,
848                                fgcolor);
849                 i += channels;
850                 if (i >= SAMPLE_ARRAY_SIZE)
851                     i -= SAMPLE_ARRAY_SIZE;
852             }
853         }
854
855         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
856
857         for (ch = 1; ch < nb_display_channels; ch++) {
858             y = s->ytop + ch * h;
859             fill_rectangle(screen,
860                            s->xleft, y, s->width, 1,
861                            fgcolor);
862         }
863         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
864     } else {
865         nb_display_channels= FFMIN(nb_display_channels, 2);
866         if (rdft_bits != s->rdft_bits) {
867             av_rdft_end(s->rdft);
868             av_free(s->rdft_data);
869             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
870             s->rdft_bits = rdft_bits;
871             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
872         }
873         {
874             FFTSample *data[2];
875             for (ch = 0; ch < nb_display_channels; ch++) {
876                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
877                 i = i_start + ch;
878                 for (x = 0; x < 2 * nb_freq; x++) {
879                     double w = (x-nb_freq) * (1.0 / nb_freq);
880                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
881                     i += channels;
882                     if (i >= SAMPLE_ARRAY_SIZE)
883                         i -= SAMPLE_ARRAY_SIZE;
884                 }
885                 av_rdft_calc(s->rdft, data[ch]);
886             }
887             // least efficient way to do this, we should of course directly access it but its more than fast enough
888             for (y = 0; y < s->height; y++) {
889                 double w = 1 / sqrt(nb_freq);
890                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
891                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
892                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
893                 a = FFMIN(a, 255);
894                 b = FFMIN(b, 255);
895                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
896
897                 fill_rectangle(screen,
898                             s->xpos, s->height-y, 1, 1,
899                             fgcolor);
900             }
901         }
902         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
903         if (!s->paused)
904             s->xpos++;
905         if (s->xpos >= s->width)
906             s->xpos= s->xleft;
907     }
908 }
909
910 static void stream_close(VideoState *is)
911 {
912     VideoPicture *vp;
913     int i;
914     /* XXX: use a special url_shutdown call to abort parse cleanly */
915     is->abort_request = 1;
916     SDL_WaitThread(is->read_tid, NULL);
917     SDL_WaitThread(is->refresh_tid, NULL);
918     packet_queue_destroy(&is->videoq);
919     packet_queue_destroy(&is->audioq);
920     packet_queue_destroy(&is->subtitleq);
921
922     /* free all pictures */
923     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
924         vp = &is->pictq[i];
925 #if CONFIG_AVFILTER
926         avfilter_unref_bufferp(&vp->picref);
927 #endif
928         if (vp->bmp) {
929             SDL_FreeYUVOverlay(vp->bmp);
930             vp->bmp = NULL;
931         }
932     }
933     SDL_DestroyMutex(is->pictq_mutex);
934     SDL_DestroyCond(is->pictq_cond);
935     SDL_DestroyMutex(is->subpq_mutex);
936     SDL_DestroyCond(is->subpq_cond);
937     SDL_DestroyCond(is->continue_read_thread);
938 #if !CONFIG_AVFILTER
939     if (is->img_convert_ctx)
940         sws_freeContext(is->img_convert_ctx);
941 #endif
942     av_free(is);
943 }
944
945 static void do_exit(VideoState *is)
946 {
947     if (is) {
948         stream_close(is);
949     }
950     av_lockmgr_register(NULL);
951     uninit_opts();
952 #if CONFIG_AVFILTER
953     avfilter_uninit();
954     av_freep(&vfilters);
955 #endif
956     avformat_network_deinit();
957     if (show_status)
958         printf("\n");
959     SDL_Quit();
960     av_log(NULL, AV_LOG_QUIET, "%s", "");
961     exit(0);
962 }
963
964 static void sigterm_handler(int sig)
965 {
966     exit(123);
967 }
968
969 static int video_open(VideoState *is, int force_set_video_mode)
970 {
971     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
972     int w,h;
973     VideoPicture *vp = &is->pictq[is->pictq_rindex];
974     SDL_Rect rect;
975
976     if (is_full_screen) flags |= SDL_FULLSCREEN;
977     else                flags |= SDL_RESIZABLE;
978
979     if (is_full_screen && fs_screen_width) {
980         w = fs_screen_width;
981         h = fs_screen_height;
982     } else if (!is_full_screen && screen_width) {
983         w = screen_width;
984         h = screen_height;
985     } else if (vp->width) {
986         calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
987         w = rect.w;
988         h = rect.h;
989     } else {
990         w = 640;
991         h = 480;
992     }
993     if (screen && is->width == screen->w && screen->w == w
994        && is->height== screen->h && screen->h == h && !force_set_video_mode)
995         return 0;
996     screen = SDL_SetVideoMode(w, h, 0, flags);
997     if (!screen) {
998         fprintf(stderr, "SDL: could not set video mode - exiting\n");
999         do_exit(is);
1000     }
1001     if (!window_title)
1002         window_title = input_filename;
1003     SDL_WM_SetCaption(window_title, window_title);
1004
1005     is->width  = screen->w;
1006     is->height = screen->h;
1007
1008     return 0;
1009 }
1010
1011 /* display the current picture, if any */
1012 static void video_display(VideoState *is)
1013 {
1014     if (!screen)
1015         video_open(is, 0);
1016     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1017         video_audio_display(is);
1018     else if (is->video_st)
1019         video_image_display(is);
1020 }
1021
1022 static int refresh_thread(void *opaque)
1023 {
1024     VideoState *is= opaque;
1025     while (!is->abort_request) {
1026         SDL_Event event;
1027         event.type = FF_REFRESH_EVENT;
1028         event.user.data1 = opaque;
1029         if (!is->refresh && (!is->paused || is->force_refresh)) {
1030             is->refresh = 1;
1031             SDL_PushEvent(&event);
1032         }
1033         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1034         av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1035     }
1036     return 0;
1037 }
1038
1039 /* get the current audio clock value */
1040 static double get_audio_clock(VideoState *is)
1041 {
1042     if (is->paused) {
1043         return is->audio_current_pts;
1044     } else {
1045         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1046     }
1047 }
1048
1049 /* get the current video clock value */
1050 static double get_video_clock(VideoState *is)
1051 {
1052     if (is->paused) {
1053         return is->video_current_pts;
1054     } else {
1055         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1056     }
1057 }
1058
1059 /* get the current external clock value */
1060 static double get_external_clock(VideoState *is)
1061 {
1062     if (is->paused) {
1063         return is->external_clock;
1064     } else {
1065         return is->external_clock_drift + av_gettime() / 1000000.0;
1066     }
1067 }
1068
1069 /* get the current master clock value */
1070 static double get_master_clock(VideoState *is)
1071 {
1072     double val;
1073
1074     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075         if (is->video_st)
1076             val = get_video_clock(is);
1077         else
1078             val = get_audio_clock(is);
1079     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080         if (is->audio_st)
1081             val = get_audio_clock(is);
1082         else
1083             val = get_video_clock(is);
1084     } else {
1085         val = get_external_clock(is);
1086     }
1087     return val;
1088 }
1089
1090 static void update_external_clock_pts(VideoState *is, double pts)
1091 {
1092    is->external_clock_time = av_gettime();
1093    is->external_clock = pts;
1094    is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1095 }
1096
1097 static void check_external_clock_sync(VideoState *is, double pts) {
1098     if (fabs(get_external_clock(is) - pts) > AV_NOSYNC_THRESHOLD) {
1099         update_external_clock_pts(is, pts);
1100     }
1101 }
1102
1103 /* seek in the stream */
1104 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1105 {
1106     if (!is->seek_req) {
1107         is->seek_pos = pos;
1108         is->seek_rel = rel;
1109         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1110         if (seek_by_bytes)
1111             is->seek_flags |= AVSEEK_FLAG_BYTE;
1112         is->seek_req = 1;
1113     }
1114 }
1115
1116 /* pause or resume the video */
1117 static void stream_toggle_pause(VideoState *is)
1118 {
1119     if (is->paused) {
1120         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1121         if (is->read_pause_return != AVERROR(ENOSYS)) {
1122             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1123         }
1124         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1125     }
1126     update_external_clock_pts(is, get_external_clock(is));
1127     is->paused = !is->paused;
1128 }
1129
1130 static double compute_target_delay(double delay, VideoState *is)
1131 {
1132     double sync_threshold, diff;
1133
1134     /* update delay to follow master synchronisation source */
1135     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1136          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1137         /* if video is slave, we try to correct big delays by
1138            duplicating or deleting a frame */
1139         diff = get_video_clock(is) - get_master_clock(is);
1140
1141         /* skip or repeat frame. We take into account the
1142            delay to compute the threshold. I still don't know
1143            if it is the best guess */
1144         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1145         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1146             if (diff <= -sync_threshold)
1147                 delay = 0;
1148             else if (diff >= sync_threshold)
1149                 delay = 2 * delay;
1150         }
1151     }
1152
1153     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1154             delay, -diff);
1155
1156     return delay;
1157 }
1158
1159 static void pictq_next_picture(VideoState *is) {
1160     /* update queue size and signal for next picture */
1161     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1162         is->pictq_rindex = 0;
1163
1164     SDL_LockMutex(is->pictq_mutex);
1165     is->pictq_size--;
1166     SDL_CondSignal(is->pictq_cond);
1167     SDL_UnlockMutex(is->pictq_mutex);
1168 }
1169
1170 static void pictq_prev_picture(VideoState *is) {
1171     VideoPicture *prevvp;
1172     /* update queue size and signal for the previous picture */
1173     prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
1174     if (prevvp->allocated && !prevvp->skip) {
1175         SDL_LockMutex(is->pictq_mutex);
1176         if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1177             if (--is->pictq_rindex == -1)
1178                 is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
1179             is->pictq_size++;
1180         }
1181         SDL_CondSignal(is->pictq_cond);
1182         SDL_UnlockMutex(is->pictq_mutex);
1183     }
1184 }
1185
1186 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1187     double time = av_gettime() / 1000000.0;
1188     /* update current video pts */
1189     is->video_current_pts = pts;
1190     is->video_current_pts_drift = is->video_current_pts - time;
1191     is->video_current_pos = pos;
1192     is->frame_last_pts = pts;
1193     check_external_clock_sync(is, is->video_current_pts);
1194 }
1195
1196 /* called to display each frame */
1197 static void video_refresh(void *opaque)
1198 {
1199     VideoState *is = opaque;
1200     VideoPicture *vp;
1201     double time;
1202
1203     SubPicture *sp, *sp2;
1204
1205     if (is->video_st) {
1206         if (is->force_refresh)
1207             pictq_prev_picture(is);
1208 retry:
1209         if (is->pictq_size == 0) {
1210             SDL_LockMutex(is->pictq_mutex);
1211             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1212                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos, 0);
1213                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1214             }
1215             SDL_UnlockMutex(is->pictq_mutex);
1216             // nothing to do, no picture to display in the que
1217         } else {
1218             double last_duration, duration, delay;
1219             /* dequeue the picture */
1220             vp = &is->pictq[is->pictq_rindex];
1221
1222             if (vp->skip) {
1223                 pictq_next_picture(is);
1224                 goto retry;
1225             }
1226
1227             if (is->paused)
1228                 goto display;
1229
1230             /* compute nominal last_duration */
1231             last_duration = vp->pts - is->frame_last_pts;
1232             if (last_duration > 0 && last_duration < 10.0) {
1233                 /* if duration of the last frame was sane, update last_duration in video state */
1234                 is->frame_last_duration = last_duration;
1235             }
1236             delay = compute_target_delay(is->frame_last_duration, is);
1237
1238             time= av_gettime()/1000000.0;
1239             if (time < is->frame_timer + delay)
1240                 return;
1241
1242             if (delay > 0)
1243                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1244
1245             SDL_LockMutex(is->pictq_mutex);
1246             update_video_pts(is, vp->pts, vp->pos, vp->serial);
1247             SDL_UnlockMutex(is->pictq_mutex);
1248
1249             if (is->pictq_size > 1) {
1250                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1251                 duration = nextvp->pts - vp->pts;
1252                 if((framedrop>0 || (framedrop && is->av_sync_type != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1253                     is->frame_drops_late++;
1254                     pictq_next_picture(is);
1255                     goto retry;
1256                 }
1257             }
1258
1259             if (is->subtitle_st) {
1260                 if (is->subtitle_stream_changed) {
1261                     SDL_LockMutex(is->subpq_mutex);
1262
1263                     while (is->subpq_size) {
1264                         free_subpicture(&is->subpq[is->subpq_rindex]);
1265
1266                         /* update queue size and signal for next picture */
1267                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1268                             is->subpq_rindex = 0;
1269
1270                         is->subpq_size--;
1271                     }
1272                     is->subtitle_stream_changed = 0;
1273
1274                     SDL_CondSignal(is->subpq_cond);
1275                     SDL_UnlockMutex(is->subpq_mutex);
1276                 } else {
1277                     if (is->subpq_size > 0) {
1278                         sp = &is->subpq[is->subpq_rindex];
1279
1280                         if (is->subpq_size > 1)
1281                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1282                         else
1283                             sp2 = NULL;
1284
1285                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1286                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1287                         {
1288                             free_subpicture(sp);
1289
1290                             /* update queue size and signal for next picture */
1291                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1292                                 is->subpq_rindex = 0;
1293
1294                             SDL_LockMutex(is->subpq_mutex);
1295                             is->subpq_size--;
1296                             SDL_CondSignal(is->subpq_cond);
1297                             SDL_UnlockMutex(is->subpq_mutex);
1298                         }
1299                     }
1300                 }
1301             }
1302
1303 display:
1304             /* display picture */
1305             if (!display_disable)
1306                 video_display(is);
1307
1308             pictq_next_picture(is);
1309         }
1310     } else if (is->audio_st) {
1311         /* draw the next audio frame */
1312
1313         /* if only audio stream, then display the audio bars (better
1314            than nothing, just to test the implementation */
1315
1316         /* display picture */
1317         if (!display_disable)
1318             video_display(is);
1319     }
1320     is->force_refresh = 0;
1321     if (show_status) {
1322         static int64_t last_time;
1323         int64_t cur_time;
1324         int aqsize, vqsize, sqsize;
1325         double av_diff;
1326
1327         cur_time = av_gettime();
1328         if (!last_time || (cur_time - last_time) >= 30000) {
1329             aqsize = 0;
1330             vqsize = 0;
1331             sqsize = 0;
1332             if (is->audio_st)
1333                 aqsize = is->audioq.size;
1334             if (is->video_st)
1335                 vqsize = is->videoq.size;
1336             if (is->subtitle_st)
1337                 sqsize = is->subtitleq.size;
1338             av_diff = 0;
1339             if (is->audio_st && is->video_st)
1340                 av_diff = get_audio_clock(is) - get_video_clock(is);
1341             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1342                    get_master_clock(is),
1343                    av_diff,
1344                    is->frame_drops_early + is->frame_drops_late,
1345                    aqsize / 1024,
1346                    vqsize / 1024,
1347                    sqsize,
1348                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1349                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1350             fflush(stdout);
1351             last_time = cur_time;
1352         }
1353     }
1354 }
1355
1356 /* allocate a picture (needs to do that in main thread to avoid
1357    potential locking problems */
1358 static void alloc_picture(VideoState *is)
1359 {
1360     VideoPicture *vp;
1361
1362     vp = &is->pictq[is->pictq_windex];
1363
1364     if (vp->bmp)
1365         SDL_FreeYUVOverlay(vp->bmp);
1366
1367 #if CONFIG_AVFILTER
1368     avfilter_unref_bufferp(&vp->picref);
1369 #endif
1370
1371     video_open(is, 0);
1372
1373     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1374                                    SDL_YV12_OVERLAY,
1375                                    screen);
1376     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1377         /* SDL allocates a buffer smaller than requested if the video
1378          * overlay hardware is unable to support the requested size. */
1379         fprintf(stderr, "Error: the video system does not support an image\n"
1380                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1381                         "to reduce the image size.\n", vp->width, vp->height );
1382         do_exit(is);
1383     }
1384
1385     SDL_LockMutex(is->pictq_mutex);
1386     vp->allocated = 1;
1387     SDL_CondSignal(is->pictq_cond);
1388     SDL_UnlockMutex(is->pictq_mutex);
1389 }
1390
1391 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
1392 {
1393     VideoPicture *vp;
1394     double frame_delay, pts = pts1;
1395
1396     /* compute the exact PTS for the picture if it is omitted in the stream
1397      * pts1 is the dts of the pkt / pts of the frame */
1398     if (pts != 0) {
1399         /* update video clock with pts, if present */
1400         is->video_clock = pts;
1401     } else {
1402         pts = is->video_clock;
1403     }
1404     /* update video clock for next frame */
1405     frame_delay = av_q2d(is->video_st->codec->time_base);
1406     /* for MPEG2, the frame can be repeated, so we update the
1407        clock accordingly */
1408     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1409     is->video_clock += frame_delay;
1410
1411 #if defined(DEBUG_SYNC) && 0
1412     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1413            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1414 #endif
1415
1416     /* wait until we have space to put a new picture */
1417     SDL_LockMutex(is->pictq_mutex);
1418
1419     /* keep the last already displayed picture in the queue */
1420     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1421            !is->videoq.abort_request) {
1422         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1423     }
1424     SDL_UnlockMutex(is->pictq_mutex);
1425
1426     if (is->videoq.abort_request)
1427         return -1;
1428
1429     vp = &is->pictq[is->pictq_windex];
1430
1431 #if CONFIG_AVFILTER
1432     vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1433 #else
1434     vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1435 #endif
1436
1437     /* alloc or resize hardware picture buffer */
1438     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1439         vp->width  != src_frame->width ||
1440         vp->height != src_frame->height) {
1441         SDL_Event event;
1442
1443         vp->allocated  = 0;
1444         vp->reallocate = 0;
1445         vp->width = src_frame->width;
1446         vp->height = src_frame->height;
1447
1448         /* the allocation must be done in the main thread to avoid
1449            locking problems. */
1450         event.type = FF_ALLOC_EVENT;
1451         event.user.data1 = is;
1452         SDL_PushEvent(&event);
1453
1454         /* wait until the picture is allocated */
1455         SDL_LockMutex(is->pictq_mutex);
1456         while (!vp->allocated && !is->videoq.abort_request) {
1457             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1458         }
1459         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1460         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1461             while (!vp->allocated) {
1462                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1463             }
1464         }
1465         SDL_UnlockMutex(is->pictq_mutex);
1466
1467         if (is->videoq.abort_request)
1468             return -1;
1469     }
1470
1471     /* if the frame is not skipped, then display it */
1472     if (vp->bmp) {
1473         AVPicture pict = { { 0 } };
1474 #if CONFIG_AVFILTER
1475         avfilter_unref_bufferp(&vp->picref);
1476         vp->picref = src_frame->opaque;
1477 #endif
1478
1479         /* get a pointer on the bitmap */
1480         SDL_LockYUVOverlay (vp->bmp);
1481
1482         pict.data[0] = vp->bmp->pixels[0];
1483         pict.data[1] = vp->bmp->pixels[2];
1484         pict.data[2] = vp->bmp->pixels[1];
1485
1486         pict.linesize[0] = vp->bmp->pitches[0];
1487         pict.linesize[1] = vp->bmp->pitches[2];
1488         pict.linesize[2] = vp->bmp->pitches[1];
1489
1490 #if CONFIG_AVFILTER
1491         // FIXME use direct rendering
1492         av_picture_copy(&pict, (AVPicture *)src_frame,
1493                         src_frame->format, vp->width, vp->height);
1494 #else
1495         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1496         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1497             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1498             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1499         if (is->img_convert_ctx == NULL) {
1500             fprintf(stderr, "Cannot initialize the conversion context\n");
1501             exit(1);
1502         }
1503         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1504                   0, vp->height, pict.data, pict.linesize);
1505 #endif
1506         /* update the bitmap content */
1507         SDL_UnlockYUVOverlay(vp->bmp);
1508
1509         vp->pts = pts;
1510         vp->pos = pos;
1511         vp->skip = 0;
1512         vp->serial = serial;
1513
1514         /* now we can update the picture count */
1515         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1516             is->pictq_windex = 0;
1517         SDL_LockMutex(is->pictq_mutex);
1518         is->pictq_size++;
1519         SDL_UnlockMutex(is->pictq_mutex);
1520     }
1521     return 0;
1522 }
1523
1524 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1525 {
1526     int got_picture, i;
1527
1528     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1529         return -1;
1530
1531     if (pkt->data == flush_pkt.data) {
1532         avcodec_flush_buffers(is->video_st->codec);
1533
1534         SDL_LockMutex(is->pictq_mutex);
1535         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1536         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1537             is->pictq[i].skip = 1;
1538         }
1539         while (is->pictq_size && !is->videoq.abort_request) {
1540             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1541         }
1542         is->video_current_pos = -1;
1543         is->frame_last_pts = AV_NOPTS_VALUE;
1544         is->frame_last_duration = 0;
1545         is->frame_timer = (double)av_gettime() / 1000000.0;
1546         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1547         SDL_UnlockMutex(is->pictq_mutex);
1548
1549         return 0;
1550     }
1551
1552     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1553         return 0;
1554
1555     if (got_picture) {
1556         int ret = 1;
1557
1558         if (decoder_reorder_pts == -1) {
1559             *pts = av_frame_get_best_effort_timestamp(frame);
1560         } else if (decoder_reorder_pts) {
1561             *pts = frame->pkt_pts;
1562         } else {
1563             *pts = frame->pkt_dts;
1564         }
1565
1566         if (*pts == AV_NOPTS_VALUE) {
1567             *pts = 0;
1568         }
1569
1570         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1571              (framedrop>0 || (framedrop && is->av_sync_type != AV_SYNC_VIDEO_MASTER))) {
1572             SDL_LockMutex(is->pictq_mutex);
1573             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1574                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1575                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1576                 double ptsdiff = dpts - is->frame_last_pts;
1577                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1578                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1579                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1580                     is->frame_last_dropped_pos = pkt->pos;
1581                     is->frame_last_dropped_pts = dpts;
1582                     is->frame_drops_early++;
1583                     ret = 0;
1584                 }
1585             }
1586             SDL_UnlockMutex(is->pictq_mutex);
1587         }
1588
1589         return ret;
1590     }
1591     return 0;
1592 }
1593
1594 #if CONFIG_AVFILTER
1595 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1596                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1597 {
1598     int ret;
1599     AVFilterInOut *outputs = NULL, *inputs = NULL;
1600
1601     if (filtergraph) {
1602         outputs = avfilter_inout_alloc();
1603         inputs  = avfilter_inout_alloc();
1604         if (!outputs || !inputs) {
1605             ret = AVERROR(ENOMEM);
1606             goto fail;
1607         }
1608
1609         outputs->name       = av_strdup("in");
1610         outputs->filter_ctx = source_ctx;
1611         outputs->pad_idx    = 0;
1612         outputs->next       = NULL;
1613
1614         inputs->name        = av_strdup("out");
1615         inputs->filter_ctx  = sink_ctx;
1616         inputs->pad_idx     = 0;
1617         inputs->next        = NULL;
1618
1619         if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1620             goto fail;
1621     } else {
1622         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1623             goto fail;
1624     }
1625
1626     return avfilter_graph_config(graph, NULL);
1627 fail:
1628     avfilter_inout_free(&outputs);
1629     avfilter_inout_free(&inputs);
1630     return ret;
1631 }
1632
1633 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1634 {
1635     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1636     char sws_flags_str[128];
1637     char buffersrc_args[256];
1638     int ret;
1639     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1640     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format, *filt_crop;
1641     AVCodecContext *codec = is->video_st->codec;
1642
1643     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1644     graph->scale_sws_opts = av_strdup(sws_flags_str);
1645
1646     snprintf(buffersrc_args, sizeof(buffersrc_args),
1647              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1648              codec->width, codec->height, codec->pix_fmt,
1649              is->video_st->time_base.num, is->video_st->time_base.den,
1650              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1651
1652     if ((ret = avfilter_graph_create_filter(&filt_src,
1653                                             avfilter_get_by_name("buffer"),
1654                                             "ffplay_buffer", buffersrc_args, NULL,
1655                                             graph)) < 0)
1656         return ret;
1657
1658     buffersink_params->pixel_fmts = pix_fmts;
1659     ret = avfilter_graph_create_filter(&filt_out,
1660                                        avfilter_get_by_name("ffbuffersink"),
1661                                        "ffplay_buffersink", NULL, buffersink_params, graph);
1662     av_freep(&buffersink_params);
1663     if (ret < 0)
1664         return ret;
1665
1666     /* SDL YUV code is not handling odd width/height for some driver
1667      * combinations, therefore we crop the picture to an even width/height. */
1668     if ((ret = avfilter_graph_create_filter(&filt_crop,
1669                                             avfilter_get_by_name("crop"),
1670                                             "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1671         return ret;
1672     if ((ret = avfilter_graph_create_filter(&filt_format,
1673                                             avfilter_get_by_name("format"),
1674                                             "format", "yuv420p", NULL, graph)) < 0)
1675         return ret;
1676     if ((ret = avfilter_link(filt_crop, 0, filt_format, 0)) < 0)
1677         return ret;
1678     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1679         return ret;
1680
1681     if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1682         return ret;
1683
1684     is->in_video_filter  = filt_src;
1685     is->out_video_filter = filt_out;
1686
1687     return ret;
1688 }
1689
1690 #endif  /* CONFIG_AVFILTER */
1691
1692 static int video_thread(void *arg)
1693 {
1694     AVPacket pkt = { 0 };
1695     VideoState *is = arg;
1696     AVFrame *frame = avcodec_alloc_frame();
1697     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1698     double pts;
1699     int ret;
1700     int serial = 0;
1701
1702 #if CONFIG_AVFILTER
1703     AVCodecContext *codec = is->video_st->codec;
1704     AVFilterGraph *graph = avfilter_graph_alloc();
1705     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1706     int last_w = 0;
1707     int last_h = 0;
1708     enum AVPixelFormat last_format = -2;
1709
1710     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1711         is->use_dr1 = 1;
1712         codec->get_buffer     = codec_get_buffer;
1713         codec->release_buffer = codec_release_buffer;
1714         codec->opaque         = &is->buffer_pool;
1715     }
1716 #endif
1717
1718     for (;;) {
1719 #if CONFIG_AVFILTER
1720         AVFilterBufferRef *picref;
1721         AVRational tb;
1722 #endif
1723         while (is->paused && !is->videoq.abort_request)
1724             SDL_Delay(10);
1725
1726         avcodec_get_frame_defaults(frame);
1727         av_free_packet(&pkt);
1728
1729         ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1730         if (ret < 0)
1731             goto the_end;
1732
1733         if (!ret)
1734             continue;
1735
1736 #if CONFIG_AVFILTER
1737         if (   last_w != is->video_st->codec->width
1738             || last_h != is->video_st->codec->height
1739             || last_format != is->video_st->codec->pix_fmt) {
1740             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1741                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1742             avfilter_graph_free(&graph);
1743             graph = avfilter_graph_alloc();
1744             if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1745                 SDL_Event event;
1746                 event.type = FF_QUIT_EVENT;
1747                 event.user.data1 = is;
1748                 SDL_PushEvent(&event);
1749                 av_free_packet(&pkt);
1750                 goto the_end;
1751             }
1752             filt_in  = is->in_video_filter;
1753             filt_out = is->out_video_filter;
1754             last_w = is->video_st->codec->width;
1755             last_h = is->video_st->codec->height;
1756             last_format = is->video_st->codec->pix_fmt;
1757         }
1758
1759         frame->pts = pts_int;
1760         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1761         if (is->use_dr1 && frame->opaque) {
1762             FrameBuffer      *buf = frame->opaque;
1763             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1764                                         frame->data, frame->linesize,
1765                                         AV_PERM_READ | AV_PERM_PRESERVE,
1766                                         frame->width, frame->height,
1767                                         frame->format);
1768
1769             avfilter_copy_frame_props(fb, frame);
1770             fb->buf->priv           = buf;
1771             fb->buf->free           = filter_release_buffer;
1772
1773             buf->refcount++;
1774             av_buffersrc_add_ref(filt_in, fb, AV_BUFFERSRC_FLAG_NO_COPY);
1775
1776         } else
1777             av_buffersrc_write_frame(filt_in, frame);
1778
1779         av_free_packet(&pkt);
1780
1781         while (ret >= 0) {
1782             is->frame_last_returned_time = av_gettime() / 1000000.0;
1783
1784             ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1785             if (ret < 0) {
1786                 ret = 0;
1787                 break;
1788             }
1789
1790             is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1791             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1792                 is->frame_last_filter_delay = 0;
1793
1794             avfilter_copy_buf_props(frame, picref);
1795
1796             pts_int = picref->pts;
1797             tb      = filt_out->inputs[0]->time_base;
1798             pos     = picref->pos;
1799             frame->opaque = picref;
1800
1801             if (av_cmp_q(tb, is->video_st->time_base)) {
1802                 av_unused int64_t pts1 = pts_int;
1803                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1804                 av_dlog(NULL, "video_thread(): "
1805                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1806                         tb.num, tb.den, pts1,
1807                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1808             }
1809             pts = pts_int * av_q2d(is->video_st->time_base);
1810             ret = queue_picture(is, frame, pts, pos, serial);
1811         }
1812 #else
1813         pts = pts_int * av_q2d(is->video_st->time_base);
1814         ret = queue_picture(is, frame, pts, pkt.pos, serial);
1815 #endif
1816
1817         if (ret < 0)
1818             goto the_end;
1819
1820         if (is->step)
1821             stream_toggle_pause(is);
1822     }
1823  the_end:
1824     avcodec_flush_buffers(is->video_st->codec);
1825 #if CONFIG_AVFILTER
1826     avfilter_graph_free(&graph);
1827 #endif
1828     av_free_packet(&pkt);
1829     avcodec_free_frame(&frame);
1830     return 0;
1831 }
1832
1833 static int subtitle_thread(void *arg)
1834 {
1835     VideoState *is = arg;
1836     SubPicture *sp;
1837     AVPacket pkt1, *pkt = &pkt1;
1838     int got_subtitle;
1839     double pts;
1840     int i, j;
1841     int r, g, b, y, u, v, a;
1842
1843     for (;;) {
1844         while (is->paused && !is->subtitleq.abort_request) {
1845             SDL_Delay(10);
1846         }
1847         if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1848             break;
1849
1850         if (pkt->data == flush_pkt.data) {
1851             avcodec_flush_buffers(is->subtitle_st->codec);
1852             continue;
1853         }
1854         SDL_LockMutex(is->subpq_mutex);
1855         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1856                !is->subtitleq.abort_request) {
1857             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1858         }
1859         SDL_UnlockMutex(is->subpq_mutex);
1860
1861         if (is->subtitleq.abort_request)
1862             return 0;
1863
1864         sp = &is->subpq[is->subpq_windex];
1865
1866        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1867            this packet, if any */
1868         pts = 0;
1869         if (pkt->pts != AV_NOPTS_VALUE)
1870             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1871
1872         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1873                                  &got_subtitle, pkt);
1874         if (got_subtitle && sp->sub.format == 0) {
1875             if (sp->sub.pts != AV_NOPTS_VALUE)
1876                 pts = sp->sub.pts / (double)AV_TIME_BASE;
1877             sp->pts = pts;
1878
1879             for (i = 0; i < sp->sub.num_rects; i++)
1880             {
1881                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1882                 {
1883                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1884                     y = RGB_TO_Y_CCIR(r, g, b);
1885                     u = RGB_TO_U_CCIR(r, g, b, 0);
1886                     v = RGB_TO_V_CCIR(r, g, b, 0);
1887                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1888                 }
1889             }
1890
1891             /* now we can update the picture count */
1892             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1893                 is->subpq_windex = 0;
1894             SDL_LockMutex(is->subpq_mutex);
1895             is->subpq_size++;
1896             SDL_UnlockMutex(is->subpq_mutex);
1897         }
1898         av_free_packet(pkt);
1899     }
1900     return 0;
1901 }
1902
1903 /* copy samples for viewing in editor window */
1904 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1905 {
1906     int size, len;
1907
1908     size = samples_size / sizeof(short);
1909     while (size > 0) {
1910         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1911         if (len > size)
1912             len = size;
1913         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1914         samples += len;
1915         is->sample_array_index += len;
1916         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1917             is->sample_array_index = 0;
1918         size -= len;
1919     }
1920 }
1921
1922 /* return the wanted number of samples to get better sync if sync_type is video
1923  * or external master clock */
1924 static int synchronize_audio(VideoState *is, int nb_samples)
1925 {
1926     int wanted_nb_samples = nb_samples;
1927
1928     /* if not master, then we try to remove or add samples to correct the clock */
1929     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1930          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1931         double diff, avg_diff;
1932         int min_nb_samples, max_nb_samples;
1933
1934         diff = get_audio_clock(is) - get_master_clock(is);
1935
1936         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1937             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1938             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1939                 /* not enough measures to have a correct estimate */
1940                 is->audio_diff_avg_count++;
1941             } else {
1942                 /* estimate the A-V difference */
1943                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1944
1945                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1946                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
1947                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1948                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
1949                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
1950                 }
1951                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1952                         diff, avg_diff, wanted_nb_samples - nb_samples,
1953                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1954             }
1955         } else {
1956             /* too big difference : may be initial PTS errors, so
1957                reset A-V filter */
1958             is->audio_diff_avg_count = 0;
1959             is->audio_diff_cum       = 0;
1960         }
1961     }
1962
1963     return wanted_nb_samples;
1964 }
1965
1966 /* decode one audio frame and returns its uncompressed size */
1967 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1968 {
1969     AVPacket *pkt_temp = &is->audio_pkt_temp;
1970     AVPacket *pkt = &is->audio_pkt;
1971     AVCodecContext *dec = is->audio_st->codec;
1972     int len1, len2, data_size, resampled_data_size;
1973     int64_t dec_channel_layout;
1974     int got_frame;
1975     double pts;
1976     int new_packet = 0;
1977     int flush_complete = 0;
1978     int wanted_nb_samples;
1979
1980     for (;;) {
1981         /* NOTE: the audio packet can contain several frames */
1982         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1983             if (!is->frame) {
1984                 if (!(is->frame = avcodec_alloc_frame()))
1985                     return AVERROR(ENOMEM);
1986             } else
1987                 avcodec_get_frame_defaults(is->frame);
1988
1989             if (is->paused)
1990                 return -1;
1991
1992             if (flush_complete)
1993                 break;
1994             new_packet = 0;
1995             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1996             if (len1 < 0) {
1997                 /* if error, we skip the frame */
1998                 pkt_temp->size = 0;
1999                 break;
2000             }
2001
2002             pkt_temp->data += len1;
2003             pkt_temp->size -= len1;
2004
2005             if (!got_frame) {
2006                 /* stop sending empty packets if the decoder is finished */
2007                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2008                     flush_complete = 1;
2009                 continue;
2010             }
2011             data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
2012                                                    is->frame->nb_samples,
2013                                                    is->frame->format, 1);
2014
2015             dec_channel_layout =
2016                 (is->frame->channel_layout && is->frame->channels == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2017                 is->frame->channel_layout : av_get_default_channel_layout(is->frame->channels);
2018             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2019
2020             if (is->frame->format        != is->audio_src.fmt            ||
2021                 dec_channel_layout       != is->audio_src.channel_layout ||
2022                 is->frame->sample_rate   != is->audio_src.freq           ||
2023                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2024                 swr_free(&is->swr_ctx);
2025                 is->swr_ctx = swr_alloc_set_opts(NULL,
2026                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2027                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2028                                                  0, NULL);
2029                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2030                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2031                         is->frame->sample_rate,   av_get_sample_fmt_name(is->frame->format), (int)is->frame->channels,
2032                         is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2033                     break;
2034                 }
2035                 is->audio_src.channel_layout = dec_channel_layout;
2036                 is->audio_src.channels = is->frame->channels;
2037                 is->audio_src.freq = is->frame->sample_rate;
2038                 is->audio_src.fmt = is->frame->format;
2039             }
2040
2041             if (is->swr_ctx) {
2042                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2043                 uint8_t *out[] = {is->audio_buf2};
2044                 int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2045                 if (wanted_nb_samples != is->frame->nb_samples) {
2046                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2047                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2048                         fprintf(stderr, "swr_set_compensation() failed\n");
2049                         break;
2050                     }
2051                 }
2052                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2053                 if (len2 < 0) {
2054                     fprintf(stderr, "swr_convert() failed\n");
2055                     break;
2056                 }
2057                 if (len2 == out_count) {
2058                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2059                     swr_init(is->swr_ctx);
2060                 }
2061                 is->audio_buf = is->audio_buf2;
2062                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2063             } else {
2064                 is->audio_buf = is->frame->data[0];
2065                 resampled_data_size = data_size;
2066             }
2067
2068             /* if no pts, then compute it */
2069             pts = is->audio_clock;
2070             *pts_ptr = pts;
2071             is->audio_clock += (double)data_size /
2072                 (is->frame->channels * is->frame->sample_rate * av_get_bytes_per_sample(is->frame->format));
2073 #ifdef DEBUG
2074             {
2075                 static double last_clock;
2076                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2077                        is->audio_clock - last_clock,
2078                        is->audio_clock, pts);
2079                 last_clock = is->audio_clock;
2080             }
2081 #endif
2082             return resampled_data_size;
2083         }
2084
2085         /* free the current packet */
2086         if (pkt->data)
2087             av_free_packet(pkt);
2088         memset(pkt_temp, 0, sizeof(*pkt_temp));
2089
2090         if (is->paused || is->audioq.abort_request) {
2091             return -1;
2092         }
2093
2094         if (is->audioq.nb_packets == 0)
2095             SDL_CondSignal(is->continue_read_thread);
2096
2097         /* read next packet */
2098         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2099             return -1;
2100
2101         if (pkt->data == flush_pkt.data) {
2102             avcodec_flush_buffers(dec);
2103             flush_complete = 0;
2104         }
2105
2106         *pkt_temp = *pkt;
2107
2108         /* if update the audio clock with the pts */
2109         if (pkt->pts != AV_NOPTS_VALUE) {
2110             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2111         }
2112     }
2113 }
2114
2115 /* prepare a new audio buffer */
2116 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2117 {
2118     VideoState *is = opaque;
2119     int audio_size, len1;
2120     int bytes_per_sec;
2121     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, 1, is->audio_tgt.fmt, 1);
2122     double pts;
2123
2124     audio_callback_time = av_gettime();
2125
2126     while (len > 0) {
2127         if (is->audio_buf_index >= is->audio_buf_size) {
2128            audio_size = audio_decode_frame(is, &pts);
2129            if (audio_size < 0) {
2130                 /* if error, just output silence */
2131                is->audio_buf      = is->silence_buf;
2132                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2133            } else {
2134                if (is->show_mode != SHOW_MODE_VIDEO)
2135                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2136                is->audio_buf_size = audio_size;
2137            }
2138            is->audio_buf_index = 0;
2139         }
2140         len1 = is->audio_buf_size - is->audio_buf_index;
2141         if (len1 > len)
2142             len1 = len;
2143         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2144         len -= len1;
2145         stream += len1;
2146         is->audio_buf_index += len1;
2147     }
2148     bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2149     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2150     /* Let's assume the audio driver that is used by SDL has two periods. */
2151     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2152     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2153     check_external_clock_sync(is, is->audio_current_pts);
2154 }
2155
2156 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2157 {
2158     SDL_AudioSpec wanted_spec, spec;
2159     const char *env;
2160     const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2161
2162     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2163     if (env) {
2164         wanted_nb_channels = atoi(env);
2165         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2166     }
2167     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2168         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2169         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2170     }
2171     wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2172     wanted_spec.freq = wanted_sample_rate;
2173     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2174         fprintf(stderr, "Invalid sample rate or channel count!\n");
2175         return -1;
2176     }
2177     wanted_spec.format = AUDIO_S16SYS;
2178     wanted_spec.silence = 0;
2179     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2180     wanted_spec.callback = sdl_audio_callback;
2181     wanted_spec.userdata = opaque;
2182     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2183         fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2184         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2185         if (!wanted_spec.channels) {
2186             fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2187             return -1;
2188         }
2189         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2190     }
2191     if (spec.format != AUDIO_S16SYS) {
2192         fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2193         return -1;
2194     }
2195     if (spec.channels != wanted_spec.channels) {
2196         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2197         if (!wanted_channel_layout) {
2198             fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2199             return -1;
2200         }
2201     }
2202
2203     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2204     audio_hw_params->freq = spec.freq;
2205     audio_hw_params->channel_layout = wanted_channel_layout;
2206     audio_hw_params->channels =  spec.channels;
2207     return spec.size;
2208 }
2209
2210 /* open a given stream. Return 0 if OK */
2211 static int stream_component_open(VideoState *is, int stream_index)
2212 {
2213     AVFormatContext *ic = is->ic;
2214     AVCodecContext *avctx;
2215     AVCodec *codec;
2216     AVDictionary *opts;
2217     AVDictionaryEntry *t = NULL;
2218
2219     if (stream_index < 0 || stream_index >= ic->nb_streams)
2220         return -1;
2221     avctx = ic->streams[stream_index]->codec;
2222
2223     codec = avcodec_find_decoder(avctx->codec_id);
2224     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2225
2226     switch(avctx->codec_type){
2227         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2228         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2229         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2230     }
2231     if (!codec)
2232         return -1;
2233
2234     avctx->workaround_bugs   = workaround_bugs;
2235     avctx->lowres            = lowres;
2236     if(avctx->lowres > codec->max_lowres){
2237         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2238                 codec->max_lowres);
2239         avctx->lowres= codec->max_lowres;
2240     }
2241     avctx->idct_algo         = idct;
2242     avctx->skip_frame        = skip_frame;
2243     avctx->skip_idct         = skip_idct;
2244     avctx->skip_loop_filter  = skip_loop_filter;
2245     avctx->error_concealment = error_concealment;
2246
2247     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2248     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2249     if(codec->capabilities & CODEC_CAP_DR1)
2250         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2251
2252     if (!av_dict_get(opts, "threads", NULL, 0))
2253         av_dict_set(&opts, "threads", "auto", 0);
2254     if (!codec ||
2255         avcodec_open2(avctx, codec, &opts) < 0)
2256         return -1;
2257     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2258         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2259         return AVERROR_OPTION_NOT_FOUND;
2260     }
2261
2262     /* prepare audio output */
2263     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2264         int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2265         if (audio_hw_buf_size < 0)
2266             return -1;
2267         is->audio_hw_buf_size = audio_hw_buf_size;
2268         is->audio_tgt = is->audio_src;
2269     }
2270
2271     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2272     switch (avctx->codec_type) {
2273     case AVMEDIA_TYPE_AUDIO:
2274         is->audio_stream = stream_index;
2275         is->audio_st = ic->streams[stream_index];
2276         is->audio_buf_size  = 0;
2277         is->audio_buf_index = 0;
2278
2279         /* init averaging filter */
2280         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2281         is->audio_diff_avg_count = 0;
2282         /* since we do not have a precise anough audio fifo fullness,
2283            we correct audio sync only if larger than this threshold */
2284         is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / av_samples_get_buffer_size(NULL, is->audio_tgt.channels, is->audio_tgt.freq, is->audio_tgt.fmt, 1);
2285
2286         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2287         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2288         packet_queue_start(&is->audioq);
2289         SDL_PauseAudio(0);
2290         break;
2291     case AVMEDIA_TYPE_VIDEO:
2292         is->video_stream = stream_index;
2293         is->video_st = ic->streams[stream_index];
2294
2295         packet_queue_start(&is->videoq);
2296         is->video_tid = SDL_CreateThread(video_thread, is);
2297         break;
2298     case AVMEDIA_TYPE_SUBTITLE:
2299         is->subtitle_stream = stream_index;
2300         is->subtitle_st = ic->streams[stream_index];
2301         packet_queue_start(&is->subtitleq);
2302
2303         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2304         break;
2305     default:
2306         break;
2307     }
2308     return 0;
2309 }
2310
2311 static void stream_component_close(VideoState *is, int stream_index)
2312 {
2313     AVFormatContext *ic = is->ic;
2314     AVCodecContext *avctx;
2315
2316     if (stream_index < 0 || stream_index >= ic->nb_streams)
2317         return;
2318     avctx = ic->streams[stream_index]->codec;
2319
2320     switch (avctx->codec_type) {
2321     case AVMEDIA_TYPE_AUDIO:
2322         packet_queue_abort(&is->audioq);
2323
2324         SDL_CloseAudio();
2325
2326         packet_queue_flush(&is->audioq);
2327         av_free_packet(&is->audio_pkt);
2328         swr_free(&is->swr_ctx);
2329         av_freep(&is->audio_buf1);
2330         is->audio_buf = NULL;
2331         avcodec_free_frame(&is->frame);
2332
2333         if (is->rdft) {
2334             av_rdft_end(is->rdft);
2335             av_freep(&is->rdft_data);
2336             is->rdft = NULL;
2337             is->rdft_bits = 0;
2338         }
2339         break;
2340     case AVMEDIA_TYPE_VIDEO:
2341         packet_queue_abort(&is->videoq);
2342
2343         /* note: we also signal this mutex to make sure we deblock the
2344            video thread in all cases */
2345         SDL_LockMutex(is->pictq_mutex);
2346         SDL_CondSignal(is->pictq_cond);
2347         SDL_UnlockMutex(is->pictq_mutex);
2348
2349         SDL_WaitThread(is->video_tid, NULL);
2350
2351         packet_queue_flush(&is->videoq);
2352         break;
2353     case AVMEDIA_TYPE_SUBTITLE:
2354         packet_queue_abort(&is->subtitleq);
2355
2356         /* note: we also signal this mutex to make sure we deblock the
2357            video thread in all cases */
2358         SDL_LockMutex(is->subpq_mutex);
2359         is->subtitle_stream_changed = 1;
2360
2361         SDL_CondSignal(is->subpq_cond);
2362         SDL_UnlockMutex(is->subpq_mutex);
2363
2364         SDL_WaitThread(is->subtitle_tid, NULL);
2365
2366         packet_queue_flush(&is->subtitleq);
2367         break;
2368     default:
2369         break;
2370     }
2371
2372     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2373     avcodec_close(avctx);
2374 #if CONFIG_AVFILTER
2375     free_buffer_pool(&is->buffer_pool);
2376 #endif
2377     switch (avctx->codec_type) {
2378     case AVMEDIA_TYPE_AUDIO:
2379         is->audio_st = NULL;
2380         is->audio_stream = -1;
2381         break;
2382     case AVMEDIA_TYPE_VIDEO:
2383         is->video_st = NULL;
2384         is->video_stream = -1;
2385         break;
2386     case AVMEDIA_TYPE_SUBTITLE:
2387         is->subtitle_st = NULL;
2388         is->subtitle_stream = -1;
2389         break;
2390     default:
2391         break;
2392     }
2393 }
2394
2395 static int decode_interrupt_cb(void *ctx)
2396 {
2397     VideoState *is = ctx;
2398     return is->abort_request;
2399 }
2400
2401 static int is_realtime(AVFormatContext *s)
2402 {
2403     if(   !strcmp(s->iformat->name, "rtp")
2404        || !strcmp(s->iformat->name, "rtsp")
2405        || !strcmp(s->iformat->name, "sdp")
2406     )
2407         return 1;
2408
2409     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2410                  || !strncmp(s->filename, "udp:", 4)
2411                 )
2412     )
2413         return 1;
2414     return 0;
2415 }
2416
2417 /* this thread gets the stream from the disk or the network */
2418 static int read_thread(void *arg)
2419 {
2420     VideoState *is = arg;
2421     AVFormatContext *ic = NULL;
2422     int err, i, ret;
2423     int st_index[AVMEDIA_TYPE_NB];
2424     AVPacket pkt1, *pkt = &pkt1;
2425     int eof = 0;
2426     int pkt_in_play_range = 0;
2427     AVDictionaryEntry *t;
2428     AVDictionary **opts;
2429     int orig_nb_streams;
2430     SDL_mutex *wait_mutex = SDL_CreateMutex();
2431
2432     memset(st_index, -1, sizeof(st_index));
2433     is->last_video_stream = is->video_stream = -1;
2434     is->last_audio_stream = is->audio_stream = -1;
2435     is->last_subtitle_stream = is->subtitle_stream = -1;
2436
2437     ic = avformat_alloc_context();
2438     ic->interrupt_callback.callback = decode_interrupt_cb;
2439     ic->interrupt_callback.opaque = is;
2440     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2441     if (err < 0) {
2442         print_error(is->filename, err);
2443         ret = -1;
2444         goto fail;
2445     }
2446     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2447         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2448         ret = AVERROR_OPTION_NOT_FOUND;
2449         goto fail;
2450     }
2451     is->ic = ic;
2452
2453     if (genpts)
2454         ic->flags |= AVFMT_FLAG_GENPTS;
2455
2456     opts = setup_find_stream_info_opts(ic, codec_opts);
2457     orig_nb_streams = ic->nb_streams;
2458
2459     err = avformat_find_stream_info(ic, opts);
2460     if (err < 0) {
2461         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2462         ret = -1;
2463         goto fail;
2464     }
2465     for (i = 0; i < orig_nb_streams; i++)
2466         av_dict_free(&opts[i]);
2467     av_freep(&opts);
2468
2469     if (ic->pb)
2470         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2471
2472     if (seek_by_bytes < 0)
2473         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2474
2475     /* if seeking requested, we execute it */
2476     if (start_time != AV_NOPTS_VALUE) {
2477         int64_t timestamp;
2478
2479         timestamp = start_time;
2480         /* add the stream start time */
2481         if (ic->start_time != AV_NOPTS_VALUE)
2482             timestamp += ic->start_time;
2483         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2484         if (ret < 0) {
2485             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2486                     is->filename, (double)timestamp / AV_TIME_BASE);
2487         }
2488     }
2489
2490     for (i = 0; i < ic->nb_streams; i++)
2491         ic->streams[i]->discard = AVDISCARD_ALL;
2492     if (!video_disable)
2493         st_index[AVMEDIA_TYPE_VIDEO] =
2494             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2495                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2496     if (!audio_disable)
2497         st_index[AVMEDIA_TYPE_AUDIO] =
2498             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2499                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2500                                 st_index[AVMEDIA_TYPE_VIDEO],
2501                                 NULL, 0);
2502     if (!video_disable)
2503         st_index[AVMEDIA_TYPE_SUBTITLE] =
2504             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2505                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2506                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2507                                  st_index[AVMEDIA_TYPE_AUDIO] :
2508                                  st_index[AVMEDIA_TYPE_VIDEO]),
2509                                 NULL, 0);
2510     if (show_status) {
2511         av_dump_format(ic, 0, is->filename, 0);
2512     }
2513
2514     is->show_mode = show_mode;
2515
2516     /* open the streams */
2517     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2518         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2519     }
2520
2521     ret = -1;
2522     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2523         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2524     }
2525     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2526     if (is->show_mode == SHOW_MODE_NONE)
2527         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2528
2529     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2530         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2531     }
2532
2533     if (is->video_stream < 0 && is->audio_stream < 0) {
2534         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2535         ret = -1;
2536         goto fail;
2537     }
2538
2539     if (infinite_buffer < 0 && is_realtime(ic))
2540         infinite_buffer = 1;
2541
2542     for (;;) {
2543         if (is->abort_request)
2544             break;
2545         if (is->paused != is->last_paused) {
2546             is->last_paused = is->paused;
2547             if (is->paused)
2548                 is->read_pause_return = av_read_pause(ic);
2549             else
2550                 av_read_play(ic);
2551         }
2552 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2553         if (is->paused &&
2554                 (!strcmp(ic->iformat->name, "rtsp") ||
2555                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2556             /* wait 10 ms to avoid trying to get another packet */
2557             /* XXX: horrible */
2558             SDL_Delay(10);
2559             continue;
2560         }
2561 #endif
2562         if (is->seek_req) {
2563             int64_t seek_target = is->seek_pos;
2564             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2565             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2566 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2567 //      of the seek_pos/seek_rel variables
2568
2569             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2570             if (ret < 0) {
2571                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2572             } else {
2573                 if (is->audio_stream >= 0) {
2574                     packet_queue_flush(&is->audioq);
2575                     packet_queue_put(&is->audioq, &flush_pkt);
2576                 }
2577                 if (is->subtitle_stream >= 0) {
2578                     packet_queue_flush(&is->subtitleq);
2579                     packet_queue_put(&is->subtitleq, &flush_pkt);
2580                 }
2581                 if (is->video_stream >= 0) {
2582                     packet_queue_flush(&is->videoq);
2583                     packet_queue_put(&is->videoq, &flush_pkt);
2584                 }
2585             }
2586             update_external_clock_pts(is, (seek_target + ic->start_time) / (double)AV_TIME_BASE);
2587             is->seek_req = 0;
2588             eof = 0;
2589         }
2590         if (is->que_attachments_req) {
2591             avformat_queue_attached_pictures(ic);
2592             is->que_attachments_req = 0;
2593         }
2594
2595         /* if the queue are full, no need to read more */
2596         if (infinite_buffer<1 &&
2597               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2598             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2599                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2600                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2601             /* wait 10 ms */
2602             SDL_LockMutex(wait_mutex);
2603             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2604             SDL_UnlockMutex(wait_mutex);
2605             continue;
2606         }
2607         if (eof) {
2608             if (is->video_stream >= 0) {
2609                 av_init_packet(pkt);
2610                 pkt->data = NULL;
2611                 pkt->size = 0;
2612                 pkt->stream_index = is->video_stream;
2613                 packet_queue_put(&is->videoq, pkt);
2614             }
2615             if (is->audio_stream >= 0 &&
2616                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2617                 av_init_packet(pkt);
2618                 pkt->data = NULL;
2619                 pkt->size = 0;
2620                 pkt->stream_index = is->audio_stream;
2621                 packet_queue_put(&is->audioq, pkt);
2622             }
2623             SDL_Delay(10);
2624             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2625                 if (loop != 1 && (!loop || --loop)) {
2626                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2627                 } else if (autoexit) {
2628                     ret = AVERROR_EOF;
2629                     goto fail;
2630                 }
2631             }
2632             eof=0;
2633             continue;
2634         }
2635         ret = av_read_frame(ic, pkt);
2636         if (ret < 0) {
2637             if (ret == AVERROR_EOF || url_feof(ic->pb))
2638                 eof = 1;
2639             if (ic->pb && ic->pb->error)
2640                 break;
2641             SDL_LockMutex(wait_mutex);
2642             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2643             SDL_UnlockMutex(wait_mutex);
2644             continue;
2645         }
2646         /* check if packet is in play range specified by user, then queue, otherwise discard */
2647         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2648                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2649                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2650                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2651                 <= ((double)duration / 1000000);
2652         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2653             packet_queue_put(&is->audioq, pkt);
2654         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2655             packet_queue_put(&is->videoq, pkt);
2656         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2657             packet_queue_put(&is->subtitleq, pkt);
2658         } else {
2659             av_free_packet(pkt);
2660         }
2661     }
2662     /* wait until the end */
2663     while (!is->abort_request) {
2664         SDL_Delay(100);
2665     }
2666
2667     ret = 0;
2668  fail:
2669     /* close each stream */
2670     if (is->audio_stream >= 0)
2671         stream_component_close(is, is->audio_stream);
2672     if (is->video_stream >= 0)
2673         stream_component_close(is, is->video_stream);
2674     if (is->subtitle_stream >= 0)
2675         stream_component_close(is, is->subtitle_stream);
2676     if (is->ic) {
2677         avformat_close_input(&is->ic);
2678     }
2679
2680     if (ret != 0) {
2681         SDL_Event event;
2682
2683         event.type = FF_QUIT_EVENT;
2684         event.user.data1 = is;
2685         SDL_PushEvent(&event);
2686     }
2687     SDL_DestroyMutex(wait_mutex);
2688     return 0;
2689 }
2690
2691 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2692 {
2693     VideoState *is;
2694
2695     is = av_mallocz(sizeof(VideoState));
2696     if (!is)
2697         return NULL;
2698     av_strlcpy(is->filename, filename, sizeof(is->filename));
2699     is->iformat = iformat;
2700     is->ytop    = 0;
2701     is->xleft   = 0;
2702
2703     /* start video display */
2704     is->pictq_mutex = SDL_CreateMutex();
2705     is->pictq_cond  = SDL_CreateCond();
2706
2707     is->subpq_mutex = SDL_CreateMutex();
2708     is->subpq_cond  = SDL_CreateCond();
2709
2710     packet_queue_init(&is->videoq);
2711     packet_queue_init(&is->audioq);
2712     packet_queue_init(&is->subtitleq);
2713
2714     is->continue_read_thread = SDL_CreateCond();
2715
2716     update_external_clock_pts(is, 0.0);
2717     is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2718     is->video_current_pts_drift = is->audio_current_pts_drift;
2719     is->av_sync_type = av_sync_type;
2720     is->read_tid     = SDL_CreateThread(read_thread, is);
2721     if (!is->read_tid) {
2722         av_free(is);
2723         return NULL;
2724     }
2725     return is;
2726 }
2727
2728 static void stream_cycle_channel(VideoState *is, int codec_type)
2729 {
2730     AVFormatContext *ic = is->ic;
2731     int start_index, stream_index;
2732     int old_index;
2733     AVStream *st;
2734
2735     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2736         start_index = is->last_video_stream;
2737         old_index = is->video_stream;
2738     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2739         start_index = is->last_audio_stream;
2740         old_index = is->audio_stream;
2741     } else {
2742         start_index = is->last_subtitle_stream;
2743         old_index = is->subtitle_stream;
2744     }
2745     stream_index = start_index;
2746     for (;;) {
2747         if (++stream_index >= is->ic->nb_streams)
2748         {
2749             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2750             {
2751                 stream_index = -1;
2752                 is->last_subtitle_stream = -1;
2753                 goto the_end;
2754             }
2755             if (start_index == -1)
2756                 return;
2757             stream_index = 0;
2758         }
2759         if (stream_index == start_index)
2760             return;
2761         st = ic->streams[stream_index];
2762         if (st->codec->codec_type == codec_type) {
2763             /* check that parameters are OK */
2764             switch (codec_type) {
2765             case AVMEDIA_TYPE_AUDIO:
2766                 if (st->codec->sample_rate != 0 &&
2767                     st->codec->channels != 0)
2768                     goto the_end;
2769                 break;
2770             case AVMEDIA_TYPE_VIDEO:
2771             case AVMEDIA_TYPE_SUBTITLE:
2772                 goto the_end;
2773             default:
2774                 break;
2775             }
2776         }
2777     }
2778  the_end:
2779     stream_component_close(is, old_index);
2780     stream_component_open(is, stream_index);
2781     if (codec_type == AVMEDIA_TYPE_VIDEO)
2782         is->que_attachments_req = 1;
2783 }
2784
2785
2786 static void toggle_full_screen(VideoState *is)
2787 {
2788 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2789     /* OS X needs to reallocate the SDL overlays */
2790     int i;
2791     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2792         is->pictq[i].reallocate = 1;
2793 #endif
2794     is_full_screen = !is_full_screen;
2795     video_open(is, 1);
2796 }
2797
2798 static void toggle_pause(VideoState *is)
2799 {
2800     stream_toggle_pause(is);
2801     is->step = 0;
2802 }
2803
2804 static void step_to_next_frame(VideoState *is)
2805 {
2806     /* if the stream is paused unpause it, then step */
2807     if (is->paused)
2808         stream_toggle_pause(is);
2809     is->step = 1;
2810 }
2811
2812 static void toggle_audio_display(VideoState *is)
2813 {
2814     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2815     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2816     fill_rectangle(screen,
2817                 is->xleft, is->ytop, is->width, is->height,
2818                 bgcolor);
2819     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2820 }
2821
2822 /* handle an event sent by the GUI */
2823 static void event_loop(VideoState *cur_stream)
2824 {
2825     SDL_Event event;
2826     double incr, pos, frac;
2827
2828     for (;;) {
2829         double x;
2830         SDL_WaitEvent(&event);
2831         switch (event.type) {
2832         case SDL_KEYDOWN:
2833             if (exit_on_keydown) {
2834                 do_exit(cur_stream);
2835                 break;
2836             }
2837             switch (event.key.keysym.sym) {
2838             case SDLK_ESCAPE:
2839             case SDLK_q:
2840                 do_exit(cur_stream);
2841                 break;
2842             case SDLK_f:
2843                 toggle_full_screen(cur_stream);
2844                 cur_stream->force_refresh = 1;
2845                 break;
2846             case SDLK_p:
2847             case SDLK_SPACE:
2848                 toggle_pause(cur_stream);
2849                 break;
2850             case SDLK_s: // S: Step to next frame
2851                 step_to_next_frame(cur_stream);
2852                 break;
2853             case SDLK_a:
2854                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2855                 break;
2856             case SDLK_v:
2857                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2858                 break;
2859             case SDLK_t:
2860                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2861                 break;
2862             case SDLK_w:
2863                 toggle_audio_display(cur_stream);
2864                 cur_stream->force_refresh = 1;
2865                 break;
2866             case SDLK_PAGEUP:
2867                 incr = 600.0;
2868                 goto do_seek;
2869             case SDLK_PAGEDOWN:
2870                 incr = -600.0;
2871                 goto do_seek;
2872             case SDLK_LEFT:
2873                 incr = -10.0;
2874                 goto do_seek;
2875             case SDLK_RIGHT:
2876                 incr = 10.0;
2877                 goto do_seek;
2878             case SDLK_UP:
2879                 incr = 60.0;
2880                 goto do_seek;
2881             case SDLK_DOWN:
2882                 incr = -60.0;
2883             do_seek:
2884                     if (seek_by_bytes) {
2885                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2886                             pos = cur_stream->video_current_pos;
2887                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2888                             pos = cur_stream->audio_pkt.pos;
2889                         } else
2890                             pos = avio_tell(cur_stream->ic->pb);
2891                         if (cur_stream->ic->bit_rate)
2892                             incr *= cur_stream->ic->bit_rate / 8.0;
2893                         else
2894                             incr *= 180000.0;
2895                         pos += incr;
2896                         stream_seek(cur_stream, pos, incr, 1);
2897                     } else {
2898                         pos = get_master_clock(cur_stream);
2899                         pos += incr;
2900                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2901                     }
2902                 break;
2903             default:
2904                 break;
2905             }
2906             break;
2907         case SDL_VIDEOEXPOSE:
2908             cur_stream->force_refresh = 1;
2909             break;
2910         case SDL_MOUSEBUTTONDOWN:
2911             if (exit_on_mousedown) {
2912                 do_exit(cur_stream);
2913                 break;
2914             }
2915         case SDL_MOUSEMOTION:
2916             if (event.type == SDL_MOUSEBUTTONDOWN) {
2917                 x = event.button.x;
2918             } else {
2919                 if (event.motion.state != SDL_PRESSED)
2920                     break;
2921                 x = event.motion.x;
2922             }
2923                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2924                     uint64_t size =  avio_size(cur_stream->ic->pb);
2925                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2926                 } else {
2927                     int64_t ts;
2928                     int ns, hh, mm, ss;
2929                     int tns, thh, tmm, tss;
2930                     tns  = cur_stream->ic->duration / 1000000LL;
2931                     thh  = tns / 3600;
2932                     tmm  = (tns % 3600) / 60;
2933                     tss  = (tns % 60);
2934                     frac = x / cur_stream->width;
2935                     ns   = frac * tns;
2936                     hh   = ns / 3600;
2937                     mm   = (ns % 3600) / 60;
2938                     ss   = (ns % 60);
2939                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2940                             hh, mm, ss, thh, tmm, tss);
2941                     ts = frac * cur_stream->ic->duration;
2942                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2943                         ts += cur_stream->ic->start_time;
2944                     stream_seek(cur_stream, ts, 0, 0);
2945                 }
2946             break;
2947         case SDL_VIDEORESIZE:
2948                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2949                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2950                 screen_width  = cur_stream->width  = event.resize.w;
2951                 screen_height = cur_stream->height = event.resize.h;
2952                 cur_stream->force_refresh = 1;
2953             break;
2954         case SDL_QUIT:
2955         case FF_QUIT_EVENT:
2956             do_exit(cur_stream);
2957             break;
2958         case FF_ALLOC_EVENT:
2959             alloc_picture(event.user.data1);
2960             break;
2961         case FF_REFRESH_EVENT:
2962             video_refresh(event.user.data1);
2963             cur_stream->refresh = 0;
2964             break;
2965         default:
2966             break;
2967         }
2968     }
2969 }
2970
2971 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2972 {
2973     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2974     return opt_default(NULL, "video_size", arg);
2975 }
2976
2977 static int opt_width(void *optctx, const char *opt, const char *arg)
2978 {
2979     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2980     return 0;
2981 }
2982
2983 static int opt_height(void *optctx, const char *opt, const char *arg)
2984 {
2985     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2986     return 0;
2987 }
2988
2989 static int opt_format(void *optctx, const char *opt, const char *arg)
2990 {
2991     file_iformat = av_find_input_format(arg);
2992     if (!file_iformat) {
2993         fprintf(stderr, "Unknown input format: %s\n", arg);
2994         return AVERROR(EINVAL);
2995     }
2996     return 0;
2997 }
2998
2999 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3000 {
3001     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3002     return opt_default(NULL, "pixel_format", arg);
3003 }
3004
3005 static int opt_sync(void *optctx, const char *opt, const char *arg)
3006 {
3007     if (!strcmp(arg, "audio"))
3008         av_sync_type = AV_SYNC_AUDIO_MASTER;
3009     else if (!strcmp(arg, "video"))
3010         av_sync_type = AV_SYNC_VIDEO_MASTER;
3011     else if (!strcmp(arg, "ext"))
3012         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3013     else {
3014         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3015         exit(1);
3016     }
3017     return 0;
3018 }
3019
3020 static int opt_seek(void *optctx, const char *opt, const char *arg)
3021 {
3022     start_time = parse_time_or_die(opt, arg, 1);
3023     return 0;
3024 }
3025
3026 static int opt_duration(void *optctx, const char *opt, const char *arg)
3027 {
3028     duration = parse_time_or_die(opt, arg, 1);
3029     return 0;
3030 }
3031
3032 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3033 {
3034     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3035                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3036                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3037                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3038     return 0;
3039 }
3040
3041 static void opt_input_file(void *optctx, const char *filename)
3042 {
3043     if (input_filename) {
3044         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3045                 filename, input_filename);
3046         exit(1);
3047     }
3048     if (!strcmp(filename, "-"))
3049         filename = "pipe:";
3050     input_filename = filename;
3051 }
3052
3053 static int opt_codec(void *o, const char *opt, const char *arg)
3054 {
3055     switch(opt[strlen(opt)-1]){
3056     case 'a' :    audio_codec_name = arg; break;
3057     case 's' : subtitle_codec_name = arg; break;
3058     case 'v' :    video_codec_name = arg; break;
3059     }
3060     return 0;
3061 }
3062
3063 static int dummy;
3064
3065 static const OptionDef options[] = {
3066 #include "cmdutils_common_opts.h"
3067     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3068     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3069     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3070     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3071     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3072     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3073     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3074     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3075     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3076     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3077     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3078     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3079     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3080     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3081     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3082     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3083     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3084     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3085     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3086     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3087     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3088     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3089     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3090     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3091     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
3092     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
3093     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3094     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3095     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3096     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3097     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3098     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3099     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3100     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3101 #if CONFIG_AVFILTER
3102     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3103 #endif
3104     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3105     { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3106     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3107     { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3108     { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder" },
3109     { NULL, },
3110 };
3111
3112 static void show_usage(void)
3113 {
3114     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3115     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3116     av_log(NULL, AV_LOG_INFO, "\n");
3117 }
3118
3119 void show_help_default(const char *opt, const char *arg)
3120 {
3121     av_log_set_callback(log_callback_help);
3122     show_usage();
3123     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3124     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3125     printf("\n");
3126     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3127     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3128 #if !CONFIG_AVFILTER
3129     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3130 #else
3131     show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
3132 #endif
3133     printf("\nWhile playing:\n"
3134            "q, ESC              quit\n"
3135            "f                   toggle full screen\n"
3136            "p, SPC              pause\n"
3137            "a                   cycle audio channel\n"
3138            "v                   cycle video channel\n"
3139            "t                   cycle subtitle channel\n"
3140            "w                   show audio waves\n"
3141            "s                   activate frame-step mode\n"
3142            "left/right          seek backward/forward 10 seconds\n"
3143            "down/up             seek backward/forward 1 minute\n"
3144            "page down/page up   seek backward/forward 10 minutes\n"
3145            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3146            );
3147 }
3148
3149 static int lockmgr(void **mtx, enum AVLockOp op)
3150 {
3151    switch(op) {
3152       case AV_LOCK_CREATE:
3153           *mtx = SDL_CreateMutex();
3154           if(!*mtx)
3155               return 1;
3156           return 0;
3157       case AV_LOCK_OBTAIN:
3158           return !!SDL_LockMutex(*mtx);
3159       case AV_LOCK_RELEASE:
3160           return !!SDL_UnlockMutex(*mtx);
3161       case AV_LOCK_DESTROY:
3162           SDL_DestroyMutex(*mtx);
3163           return 0;
3164    }
3165    return 1;
3166 }
3167
3168 /* Called from the main */
3169 int main(int argc, char **argv)
3170 {
3171     int flags;
3172     VideoState *is;
3173     char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3174
3175     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3176     parse_loglevel(argc, argv, options);
3177
3178     /* register all codecs, demux and protocols */
3179     avcodec_register_all();
3180 #if CONFIG_AVDEVICE
3181     avdevice_register_all();
3182 #endif
3183 #if CONFIG_AVFILTER
3184     avfilter_register_all();
3185 #endif
3186     av_register_all();
3187     avformat_network_init();
3188
3189     init_opts();
3190
3191     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3192     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3193
3194     show_banner(argc, argv, options);
3195
3196     parse_options(NULL, argc, argv, options, opt_input_file);
3197
3198     if (!input_filename) {
3199         show_usage();
3200         fprintf(stderr, "An input file must be specified\n");
3201         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3202         exit(1);
3203     }
3204
3205     if (display_disable) {
3206         video_disable = 1;
3207     }
3208     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3209     if (audio_disable)
3210         flags &= ~SDL_INIT_AUDIO;
3211     if (display_disable)
3212         SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3213 #if !defined(__MINGW32__) && !defined(__APPLE__)
3214     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3215 #endif
3216     if (SDL_Init (flags)) {
3217         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3218         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3219         exit(1);
3220     }
3221
3222     if (!display_disable) {
3223 #if HAVE_SDL_VIDEO_SIZE
3224         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3225         fs_screen_width = vi->current_w;
3226         fs_screen_height = vi->current_h;
3227 #endif
3228     }
3229
3230     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3231     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3232     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3233
3234     if (av_lockmgr_register(lockmgr)) {
3235         fprintf(stderr, "Could not initialize lock manager!\n");
3236         do_exit(NULL);
3237     }
3238
3239     av_init_packet(&flush_pkt);
3240     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3241
3242     is = stream_open(input_filename, file_iformat);
3243     if (!is) {
3244         fprintf(stderr, "Failed to initialize VideoState!\n");
3245         do_exit(NULL);
3246     }
3247
3248     event_loop(is);
3249
3250     /* never returns */
3251
3252     return 0;
3253 }