lavfi: bump micro after recent changes in the overlay filter
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41 #include "libswresample/swresample.h"
42
43 #if CONFIG_AVFILTER
44 # include "libavfilter/avcodec.h"
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/avfiltergraph.h"
47 # include "libavfilter/buffersink.h"
48 #endif
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #include "cmdutils.h"
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double duration;                             ///<expected duration of the frame
100     int64_t pos;                                 ///<byte position in file
101     int skip;
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     int reallocate;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation, resampling and format conversion */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     enum AVSampleFormat audio_tgt_fmt;
166     int audio_src_channels;
167     int audio_tgt_channels;
168     int64_t audio_src_channel_layout;
169     int64_t audio_tgt_channel_layout;
170     int audio_src_freq;
171     int audio_tgt_freq;
172     struct SwrContext *swr_ctx;
173     double audio_current_pts;
174     double audio_current_pts_drift;
175     int frame_drops_early;
176     int frame_drops_late;
177
178     enum ShowMode {
179         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
180     } show_mode;
181     int16_t sample_array[SAMPLE_ARRAY_SIZE];
182     int sample_array_index;
183     int last_i_start;
184     RDFTContext *rdft;
185     int rdft_bits;
186     FFTSample *rdft_data;
187     int xpos;
188
189     SDL_Thread *subtitle_tid;
190     int subtitle_stream;
191     int subtitle_stream_changed;
192     AVStream *subtitle_st;
193     PacketQueue subtitleq;
194     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
195     int subpq_size, subpq_rindex, subpq_windex;
196     SDL_mutex *subpq_mutex;
197     SDL_cond *subpq_cond;
198
199     double frame_timer;
200     double frame_last_pts;
201     double frame_last_duration;
202     double frame_last_dropped_pts;
203     double frame_last_returned_time;
204     double frame_last_filter_delay;
205     int64_t frame_last_dropped_pos;
206     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
207     int video_stream;
208     AVStream *video_st;
209     PacketQueue videoq;
210     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
211     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
212     int64_t video_current_pos;                   ///<current displayed file pos
213     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
214     int pictq_size, pictq_rindex, pictq_windex;
215     SDL_mutex *pictq_mutex;
216     SDL_cond *pictq_cond;
217 #if !CONFIG_AVFILTER
218     struct SwsContext *img_convert_ctx;
219 #endif
220
221     char filename[1024];
222     int width, height, xleft, ytop;
223     int step;
224
225 #if CONFIG_AVFILTER
226     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
227 #endif
228
229     int refresh;
230 } VideoState;
231
232 static int opt_help(const char *opt, const char *arg);
233
234 /* options specified by the user */
235 static AVInputFormat *file_iformat;
236 static const char *input_filename;
237 static const char *window_title;
238 static int fs_screen_width;
239 static int fs_screen_height;
240 static int screen_width = 0;
241 static int screen_height = 0;
242 static int audio_disable;
243 static int video_disable;
244 static int wanted_stream[AVMEDIA_TYPE_NB]={
245     [AVMEDIA_TYPE_AUDIO]=-1,
246     [AVMEDIA_TYPE_VIDEO]=-1,
247     [AVMEDIA_TYPE_SUBTITLE]=-1,
248 };
249 static int seek_by_bytes=-1;
250 static int display_disable;
251 static int show_status = 1;
252 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
253 static int64_t start_time = AV_NOPTS_VALUE;
254 static int64_t duration = AV_NOPTS_VALUE;
255 static int workaround_bugs = 1;
256 static int fast = 0;
257 static int genpts = 0;
258 static int lowres = 0;
259 static int idct = FF_IDCT_AUTO;
260 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
261 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
262 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
263 static int error_recognition = FF_ER_CAREFUL;
264 static int error_concealment = 3;
265 static int decoder_reorder_pts= -1;
266 static int autoexit;
267 static int exit_on_keydown;
268 static int exit_on_mousedown;
269 static int loop=1;
270 static int framedrop=-1;
271 static enum ShowMode show_mode = SHOW_MODE_NONE;
272 static const char *audio_codec_name;
273 static const char *subtitle_codec_name;
274 static const char *video_codec_name;
275
276 static int rdftspeed=20;
277 #if CONFIG_AVFILTER
278 static char *vfilters = NULL;
279 #endif
280
281 /* current context */
282 static int is_full_screen;
283 static int64_t audio_callback_time;
284
285 static AVPacket flush_pkt;
286
287 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
288 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
289 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
290
291 static SDL_Surface *screen;
292
293 void exit_program(int ret)
294 {
295     exit(ret);
296 }
297
298 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
299 {
300     AVPacketList *pkt1;
301
302     /* duplicate the packet */
303     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
304         return -1;
305
306     pkt1 = av_malloc(sizeof(AVPacketList));
307     if (!pkt1)
308         return -1;
309     pkt1->pkt = *pkt;
310     pkt1->next = NULL;
311
312
313     SDL_LockMutex(q->mutex);
314
315     if (!q->last_pkt)
316
317         q->first_pkt = pkt1;
318     else
319         q->last_pkt->next = pkt1;
320     q->last_pkt = pkt1;
321     q->nb_packets++;
322     q->size += pkt1->pkt.size + sizeof(*pkt1);
323     /* XXX: should duplicate packet data in DV case */
324     SDL_CondSignal(q->cond);
325
326     SDL_UnlockMutex(q->mutex);
327     return 0;
328 }
329
330 /* packet queue handling */
331 static void packet_queue_init(PacketQueue *q)
332 {
333     memset(q, 0, sizeof(PacketQueue));
334     q->mutex = SDL_CreateMutex();
335     q->cond = SDL_CreateCond();
336     packet_queue_put(q, &flush_pkt);
337 }
338
339 static void packet_queue_flush(PacketQueue *q)
340 {
341     AVPacketList *pkt, *pkt1;
342
343     SDL_LockMutex(q->mutex);
344     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
345         pkt1 = pkt->next;
346         av_free_packet(&pkt->pkt);
347         av_freep(&pkt);
348     }
349     q->last_pkt = NULL;
350     q->first_pkt = NULL;
351     q->nb_packets = 0;
352     q->size = 0;
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 static void packet_queue_end(PacketQueue *q)
357 {
358     packet_queue_flush(q);
359     SDL_DestroyMutex(q->mutex);
360     SDL_DestroyCond(q->cond);
361 }
362
363 static void packet_queue_abort(PacketQueue *q)
364 {
365     SDL_LockMutex(q->mutex);
366
367     q->abort_request = 1;
368
369     SDL_CondSignal(q->cond);
370
371     SDL_UnlockMutex(q->mutex);
372 }
373
374 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
375 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
376 {
377     AVPacketList *pkt1;
378     int ret;
379
380     SDL_LockMutex(q->mutex);
381
382     for(;;) {
383         if (q->abort_request) {
384             ret = -1;
385             break;
386         }
387
388         pkt1 = q->first_pkt;
389         if (pkt1) {
390             q->first_pkt = pkt1->next;
391             if (!q->first_pkt)
392                 q->last_pkt = NULL;
393             q->nb_packets--;
394             q->size -= pkt1->pkt.size + sizeof(*pkt1);
395             *pkt = pkt1->pkt;
396             av_free(pkt1);
397             ret = 1;
398             break;
399         } else if (!block) {
400             ret = 0;
401             break;
402         } else {
403             SDL_CondWait(q->cond, q->mutex);
404         }
405     }
406     SDL_UnlockMutex(q->mutex);
407     return ret;
408 }
409
410 static inline void fill_rectangle(SDL_Surface *screen,
411                                   int x, int y, int w, int h, int color)
412 {
413     SDL_Rect rect;
414     rect.x = x;
415     rect.y = y;
416     rect.w = w;
417     rect.h = h;
418     SDL_FillRect(screen, &rect, color);
419 }
420
421 #define ALPHA_BLEND(a, oldp, newp, s)\
422 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
423
424 #define RGBA_IN(r, g, b, a, s)\
425 {\
426     unsigned int v = ((const uint32_t *)(s))[0];\
427     a = (v >> 24) & 0xff;\
428     r = (v >> 16) & 0xff;\
429     g = (v >> 8) & 0xff;\
430     b = v & 0xff;\
431 }
432
433 #define YUVA_IN(y, u, v, a, s, pal)\
434 {\
435     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
436     a = (val >> 24) & 0xff;\
437     y = (val >> 16) & 0xff;\
438     u = (val >> 8) & 0xff;\
439     v = val & 0xff;\
440 }
441
442 #define YUVA_OUT(d, y, u, v, a)\
443 {\
444     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
445 }
446
447
448 #define BPP 1
449
450 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
451 {
452     int wrap, wrap3, width2, skip2;
453     int y, u, v, a, u1, v1, a1, w, h;
454     uint8_t *lum, *cb, *cr;
455     const uint8_t *p;
456     const uint32_t *pal;
457     int dstx, dsty, dstw, dsth;
458
459     dstw = av_clip(rect->w, 0, imgw);
460     dsth = av_clip(rect->h, 0, imgh);
461     dstx = av_clip(rect->x, 0, imgw - dstw);
462     dsty = av_clip(rect->y, 0, imgh - dsth);
463     lum = dst->data[0] + dsty * dst->linesize[0];
464     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
465     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
466
467     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
468     skip2 = dstx >> 1;
469     wrap = dst->linesize[0];
470     wrap3 = rect->pict.linesize[0];
471     p = rect->pict.data[0];
472     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
473
474     if (dsty & 1) {
475         lum += dstx;
476         cb += skip2;
477         cr += skip2;
478
479         if (dstx & 1) {
480             YUVA_IN(y, u, v, a, p, pal);
481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
482             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
483             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
484             cb++;
485             cr++;
486             lum++;
487             p += BPP;
488         }
489         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
490             YUVA_IN(y, u, v, a, p, pal);
491             u1 = u;
492             v1 = v;
493             a1 = a;
494             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495
496             YUVA_IN(y, u, v, a, p + BPP, pal);
497             u1 += u;
498             v1 += v;
499             a1 += a;
500             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
501             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
502             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
503             cb++;
504             cr++;
505             p += 2 * BPP;
506             lum += 2;
507         }
508         if (w) {
509             YUVA_IN(y, u, v, a, p, pal);
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513             p++;
514             lum++;
515         }
516         p += wrap3 - dstw * BPP;
517         lum += wrap - dstw - dstx;
518         cb += dst->linesize[1] - width2 - skip2;
519         cr += dst->linesize[2] - width2 - skip2;
520     }
521     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
522         lum += dstx;
523         cb += skip2;
524         cr += skip2;
525
526         if (dstx & 1) {
527             YUVA_IN(y, u, v, a, p, pal);
528             u1 = u;
529             v1 = v;
530             a1 = a;
531             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532             p += wrap3;
533             lum += wrap;
534             YUVA_IN(y, u, v, a, p, pal);
535             u1 += u;
536             v1 += v;
537             a1 += a;
538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
540             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
541             cb++;
542             cr++;
543             p += -wrap3 + BPP;
544             lum += -wrap + 1;
545         }
546         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 = u;
549             v1 = v;
550             a1 = a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558             p += wrap3;
559             lum += wrap;
560
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566
567             YUVA_IN(y, u, v, a, p + BPP, pal);
568             u1 += u;
569             v1 += v;
570             a1 += a;
571             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
572
573             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
574             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
575
576             cb++;
577             cr++;
578             p += -wrap3 + 2 * BPP;
579             lum += -wrap + 2;
580         }
581         if (w) {
582             YUVA_IN(y, u, v, a, p, pal);
583             u1 = u;
584             v1 = v;
585             a1 = a;
586             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587             p += wrap3;
588             lum += wrap;
589             YUVA_IN(y, u, v, a, p, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
595             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
596             cb++;
597             cr++;
598             p += -wrap3 + BPP;
599             lum += -wrap + 1;
600         }
601         p += wrap3 + (wrap3 - dstw * BPP);
602         lum += wrap + (wrap - dstw - dstx);
603         cb += dst->linesize[1] - width2 - skip2;
604         cr += dst->linesize[2] - width2 - skip2;
605     }
606     /* handle odd height */
607     if (h) {
608         lum += dstx;
609         cb += skip2;
610         cr += skip2;
611
612         if (dstx & 1) {
613             YUVA_IN(y, u, v, a, p, pal);
614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
616             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
617             cb++;
618             cr++;
619             lum++;
620             p += BPP;
621         }
622         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
623             YUVA_IN(y, u, v, a, p, pal);
624             u1 = u;
625             v1 = v;
626             a1 = a;
627             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
628
629             YUVA_IN(y, u, v, a, p + BPP, pal);
630             u1 += u;
631             v1 += v;
632             a1 += a;
633             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
634             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
635             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
636             cb++;
637             cr++;
638             p += 2 * BPP;
639             lum += 2;
640         }
641         if (w) {
642             YUVA_IN(y, u, v, a, p, pal);
643             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646         }
647     }
648 }
649
650 static void free_subpicture(SubPicture *sp)
651 {
652     avsubtitle_free(&sp->sub);
653 }
654
655 static void video_image_display(VideoState *is)
656 {
657     VideoPicture *vp;
658     SubPicture *sp;
659     AVPicture pict;
660     float aspect_ratio;
661     int width, height, x, y;
662     SDL_Rect rect;
663     int i;
664
665     vp = &is->pictq[is->pictq_rindex];
666     if (vp->bmp) {
667 #if CONFIG_AVFILTER
668          if (vp->picref->video->sample_aspect_ratio.num == 0)
669              aspect_ratio = 0;
670          else
671              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
672 #else
673
674         /* XXX: use variable in the frame */
675         if (is->video_st->sample_aspect_ratio.num)
676             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
677         else if (is->video_st->codec->sample_aspect_ratio.num)
678             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
679         else
680             aspect_ratio = 0;
681 #endif
682         if (aspect_ratio <= 0.0)
683             aspect_ratio = 1.0;
684         aspect_ratio *= (float)vp->width / (float)vp->height;
685
686         if (is->subtitle_st) {
687             if (is->subpq_size > 0) {
688                 sp = &is->subpq[is->subpq_rindex];
689
690                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
691                     SDL_LockYUVOverlay (vp->bmp);
692
693                     pict.data[0] = vp->bmp->pixels[0];
694                     pict.data[1] = vp->bmp->pixels[2];
695                     pict.data[2] = vp->bmp->pixels[1];
696
697                     pict.linesize[0] = vp->bmp->pitches[0];
698                     pict.linesize[1] = vp->bmp->pitches[2];
699                     pict.linesize[2] = vp->bmp->pitches[1];
700
701                     for (i = 0; i < sp->sub.num_rects; i++)
702                         blend_subrect(&pict, sp->sub.rects[i],
703                                       vp->bmp->w, vp->bmp->h);
704
705                     SDL_UnlockYUVOverlay (vp->bmp);
706                 }
707             }
708         }
709
710
711         /* XXX: we suppose the screen has a 1.0 pixel ratio */
712         height = is->height;
713         width = ((int)rint(height * aspect_ratio)) & ~1;
714         if (width > is->width) {
715             width = is->width;
716             height = ((int)rint(width / aspect_ratio)) & ~1;
717         }
718         x = (is->width - width) / 2;
719         y = (is->height - height) / 2;
720         is->no_background = 0;
721         rect.x = is->xleft + x;
722         rect.y = is->ytop  + y;
723         rect.w = FFMAX(width,  1);
724         rect.h = FFMAX(height, 1);
725         SDL_DisplayYUVOverlay(vp->bmp, &rect);
726     }
727 }
728
729 static inline int compute_mod(int a, int b)
730 {
731     return a < 0 ? a%b + b : a%b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
742         ;
743     nb_freq= 1<<(rdft_bits-1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->audio_tgt_channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
750         n = 2 * channels;
751         delay = s->audio_write_buf_size;
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
759         }
760
761         delay += 2*data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if (s->show_mode == SHOW_MODE_WAVES) {
767             h= INT_MIN;
768             for(i=0; i<1000; i+=channels){
769                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a= s->sample_array[idx];
771                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
772                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
773                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
774                 int score= a-d;
775                 if(h<score && (b^c)<0){
776                     h= score;
777                     i_start= idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if (s->show_mode == SHOW_MODE_WAVES) {
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for(ch = 0;ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for(x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for(ch = 1;ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     }else{
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if(rdft_bits != s->rdft_bits){
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits= rdft_bits;
835             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for(ch = 0;ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2*nb_freq*ch;
841                 i = i_start + ch;
842                 for(x = 0; x < 2*nb_freq; x++) {
843                     double w= (x-nb_freq)*(1.0/nb_freq);
844                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             //least efficient way to do this, we should of course directly access it but its more than fast enough
852             for(y=0; y<s->height; y++){
853                 double w= 1/sqrt(nb_freq);
854                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
855                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
856                        + data[1][2*y+1]*data[1][2*y+1])) : a;
857                 a= FFMIN(a,255);
858                 b= FFMIN(b,255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if(s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static void stream_close(VideoState *is)
874 {
875     VideoPicture *vp;
876     int i;
877     /* XXX: use a special url_shutdown call to abort parse cleanly */
878     is->abort_request = 1;
879     SDL_WaitThread(is->read_tid, NULL);
880     SDL_WaitThread(is->refresh_tid, NULL);
881
882     /* free all pictures */
883     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
884         vp = &is->pictq[i];
885 #if CONFIG_AVFILTER
886         if (vp->picref) {
887             avfilter_unref_buffer(vp->picref);
888             vp->picref = NULL;
889         }
890 #endif
891         if (vp->bmp) {
892             SDL_FreeYUVOverlay(vp->bmp);
893             vp->bmp = NULL;
894         }
895     }
896     SDL_DestroyMutex(is->pictq_mutex);
897     SDL_DestroyCond(is->pictq_cond);
898     SDL_DestroyMutex(is->subpq_mutex);
899     SDL_DestroyCond(is->subpq_cond);
900 #if !CONFIG_AVFILTER
901     if (is->img_convert_ctx)
902         sws_freeContext(is->img_convert_ctx);
903 #endif
904     av_free(is);
905 }
906
907 static void do_exit(VideoState *is)
908 {
909     if (is) {
910         stream_close(is);
911     }
912     av_lockmgr_register(NULL);
913     uninit_opts();
914 #if CONFIG_AVFILTER
915     avfilter_uninit();
916 #endif
917     if (show_status)
918         printf("\n");
919     SDL_Quit();
920     av_log(NULL, AV_LOG_QUIET, "%s", "");
921     exit(0);
922 }
923
924 static int video_open(VideoState *is){
925     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
926     int w,h;
927
928     if(is_full_screen) flags |= SDL_FULLSCREEN;
929     else               flags |= SDL_RESIZABLE;
930
931     if (is_full_screen && fs_screen_width) {
932         w = fs_screen_width;
933         h = fs_screen_height;
934     } else if(!is_full_screen && screen_width){
935         w = screen_width;
936         h = screen_height;
937 #if CONFIG_AVFILTER
938     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
939         w = is->out_video_filter->inputs[0]->w;
940         h = is->out_video_filter->inputs[0]->h;
941 #else
942     }else if (is->video_st && is->video_st->codec->width){
943         w = is->video_st->codec->width;
944         h = is->video_st->codec->height;
945 #endif
946     } else {
947         w = 640;
948         h = 480;
949     }
950     if(screen && is->width == screen->w && screen->w == w
951        && is->height== screen->h && screen->h == h)
952         return 0;
953     screen = SDL_SetVideoMode(w, h, 0, flags);
954     if (!screen) {
955         fprintf(stderr, "SDL: could not set video mode - exiting\n");
956         do_exit(is);
957     }
958     if (!window_title)
959         window_title = input_filename;
960     SDL_WM_SetCaption(window_title, window_title);
961
962     is->width = screen->w;
963     is->height = screen->h;
964
965     return 0;
966 }
967
968 /* display the current picture, if any */
969 static void video_display(VideoState *is)
970 {
971     if(!screen)
972         video_open(is);
973     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
974         video_audio_display(is);
975     else if (is->video_st)
976         video_image_display(is);
977 }
978
979 static int refresh_thread(void *opaque)
980 {
981     VideoState *is= opaque;
982     while(!is->abort_request){
983         SDL_Event event;
984         event.type = FF_REFRESH_EVENT;
985         event.user.data1 = opaque;
986         if(!is->refresh){
987             is->refresh=1;
988             SDL_PushEvent(&event);
989         }
990         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
991         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
992     }
993     return 0;
994 }
995
996 /* get the current audio clock value */
997 static double get_audio_clock(VideoState *is)
998 {
999     if (is->paused) {
1000         return is->audio_current_pts;
1001     } else {
1002         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1003     }
1004 }
1005
1006 /* get the current video clock value */
1007 static double get_video_clock(VideoState *is)
1008 {
1009     if (is->paused) {
1010         return is->video_current_pts;
1011     } else {
1012         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1013     }
1014 }
1015
1016 /* get the current external clock value */
1017 static double get_external_clock(VideoState *is)
1018 {
1019     int64_t ti;
1020     ti = av_gettime();
1021     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1022 }
1023
1024 /* get the current master clock value */
1025 static double get_master_clock(VideoState *is)
1026 {
1027     double val;
1028
1029     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1030         if (is->video_st)
1031             val = get_video_clock(is);
1032         else
1033             val = get_audio_clock(is);
1034     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1035         if (is->audio_st)
1036             val = get_audio_clock(is);
1037         else
1038             val = get_video_clock(is);
1039     } else {
1040         val = get_external_clock(is);
1041     }
1042     return val;
1043 }
1044
1045 /* seek in the stream */
1046 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1047 {
1048     if (!is->seek_req) {
1049         is->seek_pos = pos;
1050         is->seek_rel = rel;
1051         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1052         if (seek_by_bytes)
1053             is->seek_flags |= AVSEEK_FLAG_BYTE;
1054         is->seek_req = 1;
1055     }
1056 }
1057
1058 /* pause or resume the video */
1059 static void stream_toggle_pause(VideoState *is)
1060 {
1061     if (is->paused) {
1062         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1063         if(is->read_pause_return != AVERROR(ENOSYS)){
1064             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1065         }
1066         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1067     }
1068     is->paused = !is->paused;
1069 }
1070
1071 static double compute_target_delay(double delay, VideoState *is)
1072 {
1073     double sync_threshold, diff;
1074
1075     /* update delay to follow master synchronisation source */
1076     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1077          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1078         /* if video is slave, we try to correct big delays by
1079            duplicating or deleting a frame */
1080         diff = get_video_clock(is) - get_master_clock(is);
1081
1082         /* skip or repeat frame. We take into account the
1083            delay to compute the threshold. I still don't know
1084            if it is the best guess */
1085         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1086         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1087             if (diff <= -sync_threshold)
1088                 delay = 0;
1089             else if (diff >= sync_threshold)
1090                 delay = 2 * delay;
1091         }
1092     }
1093
1094     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1095             delay, -diff);
1096
1097     return delay;
1098 }
1099
1100 static void pictq_next_picture(VideoState *is) {
1101     /* update queue size and signal for next picture */
1102     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1103         is->pictq_rindex = 0;
1104
1105     SDL_LockMutex(is->pictq_mutex);
1106     is->pictq_size--;
1107     SDL_CondSignal(is->pictq_cond);
1108     SDL_UnlockMutex(is->pictq_mutex);
1109 }
1110
1111 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1112     double time = av_gettime() / 1000000.0;
1113     /* update current video pts */
1114     is->video_current_pts = pts;
1115     is->video_current_pts_drift = is->video_current_pts - time;
1116     is->video_current_pos = pos;
1117     is->frame_last_pts = pts;
1118 }
1119
1120 /* called to display each frame */
1121 static void video_refresh(void *opaque)
1122 {
1123     VideoState *is = opaque;
1124     VideoPicture *vp;
1125     double time;
1126
1127     SubPicture *sp, *sp2;
1128
1129     if (is->video_st) {
1130 retry:
1131         if (is->pictq_size == 0) {
1132             SDL_LockMutex(is->pictq_mutex);
1133             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1134                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1135                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1136             }
1137             SDL_UnlockMutex(is->pictq_mutex);
1138             //nothing to do, no picture to display in the que
1139         } else {
1140             double last_duration, duration, delay;
1141             /* dequeue the picture */
1142             vp = &is->pictq[is->pictq_rindex];
1143
1144             if (vp->skip) {
1145                 pictq_next_picture(is);
1146                 goto retry;
1147             }
1148
1149             /* compute nominal last_duration */
1150             last_duration = vp->pts - is->frame_last_pts;
1151             if (last_duration > 0 && last_duration < 10.0) {
1152                 /* if duration of the last frame was sane, update last_duration in video state */
1153                 is->frame_last_duration = last_duration;
1154             }
1155             delay = compute_target_delay(is->frame_last_duration, is);
1156
1157             time= av_gettime()/1000000.0;
1158             if(time < is->frame_timer + delay)
1159                 return;
1160
1161             if (delay > 0)
1162                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1163
1164             SDL_LockMutex(is->pictq_mutex);
1165             update_video_pts(is, vp->pts, vp->pos);
1166             SDL_UnlockMutex(is->pictq_mutex);
1167
1168             if(is->pictq_size > 1) {
1169                  VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1170                  duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1171             } else {
1172                  duration = vp->duration;
1173             }
1174
1175             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1176                 if(is->pictq_size > 1){
1177                     is->frame_drops_late++;
1178                     pictq_next_picture(is);
1179                     goto retry;
1180                 }
1181             }
1182
1183             if(is->subtitle_st) {
1184                 if (is->subtitle_stream_changed) {
1185                     SDL_LockMutex(is->subpq_mutex);
1186
1187                     while (is->subpq_size) {
1188                         free_subpicture(&is->subpq[is->subpq_rindex]);
1189
1190                         /* update queue size and signal for next picture */
1191                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1192                             is->subpq_rindex = 0;
1193
1194                         is->subpq_size--;
1195                     }
1196                     is->subtitle_stream_changed = 0;
1197
1198                     SDL_CondSignal(is->subpq_cond);
1199                     SDL_UnlockMutex(is->subpq_mutex);
1200                 } else {
1201                     if (is->subpq_size > 0) {
1202                         sp = &is->subpq[is->subpq_rindex];
1203
1204                         if (is->subpq_size > 1)
1205                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1206                         else
1207                             sp2 = NULL;
1208
1209                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1210                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1211                         {
1212                             free_subpicture(sp);
1213
1214                             /* update queue size and signal for next picture */
1215                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1216                                 is->subpq_rindex = 0;
1217
1218                             SDL_LockMutex(is->subpq_mutex);
1219                             is->subpq_size--;
1220                             SDL_CondSignal(is->subpq_cond);
1221                             SDL_UnlockMutex(is->subpq_mutex);
1222                         }
1223                     }
1224                 }
1225             }
1226
1227             /* display picture */
1228             if (!display_disable)
1229                 video_display(is);
1230
1231             pictq_next_picture(is);
1232         }
1233     } else if (is->audio_st) {
1234         /* draw the next audio frame */
1235
1236         /* if only audio stream, then display the audio bars (better
1237            than nothing, just to test the implementation */
1238
1239         /* display picture */
1240         if (!display_disable)
1241             video_display(is);
1242     }
1243     if (show_status) {
1244         static int64_t last_time;
1245         int64_t cur_time;
1246         int aqsize, vqsize, sqsize;
1247         double av_diff;
1248
1249         cur_time = av_gettime();
1250         if (!last_time || (cur_time - last_time) >= 30000) {
1251             aqsize = 0;
1252             vqsize = 0;
1253             sqsize = 0;
1254             if (is->audio_st)
1255                 aqsize = is->audioq.size;
1256             if (is->video_st)
1257                 vqsize = is->videoq.size;
1258             if (is->subtitle_st)
1259                 sqsize = is->subtitleq.size;
1260             av_diff = 0;
1261             if (is->audio_st && is->video_st)
1262                 av_diff = get_audio_clock(is) - get_video_clock(is);
1263             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1264                    get_master_clock(is),
1265                    av_diff,
1266                    is->frame_drops_early + is->frame_drops_late,
1267                    aqsize / 1024,
1268                    vqsize / 1024,
1269                    sqsize,
1270                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1271                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1272             fflush(stdout);
1273             last_time = cur_time;
1274         }
1275     }
1276 }
1277
1278 /* allocate a picture (needs to do that in main thread to avoid
1279    potential locking problems */
1280 static void alloc_picture(void *opaque)
1281 {
1282     VideoState *is = opaque;
1283     VideoPicture *vp;
1284
1285     vp = &is->pictq[is->pictq_windex];
1286
1287     if (vp->bmp)
1288         SDL_FreeYUVOverlay(vp->bmp);
1289
1290 #if CONFIG_AVFILTER
1291     if (vp->picref)
1292         avfilter_unref_buffer(vp->picref);
1293     vp->picref = NULL;
1294
1295     vp->width   = is->out_video_filter->inputs[0]->w;
1296     vp->height  = is->out_video_filter->inputs[0]->h;
1297     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1298 #else
1299     vp->width   = is->video_st->codec->width;
1300     vp->height  = is->video_st->codec->height;
1301     vp->pix_fmt = is->video_st->codec->pix_fmt;
1302 #endif
1303
1304     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1305                                    SDL_YV12_OVERLAY,
1306                                    screen);
1307     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1308         /* SDL allocates a buffer smaller than requested if the video
1309          * overlay hardware is unable to support the requested size. */
1310         fprintf(stderr, "Error: the video system does not support an image\n"
1311                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1312                         "to reduce the image size.\n", vp->width, vp->height );
1313         do_exit(is);
1314     }
1315
1316     SDL_LockMutex(is->pictq_mutex);
1317     vp->allocated = 1;
1318     SDL_CondSignal(is->pictq_cond);
1319     SDL_UnlockMutex(is->pictq_mutex);
1320 }
1321
1322 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1323 {
1324     VideoPicture *vp;
1325     double frame_delay, pts = pts1;
1326
1327     /* compute the exact PTS for the picture if it is omitted in the stream
1328      * pts1 is the dts of the pkt / pts of the frame */
1329     if (pts != 0) {
1330         /* update video clock with pts, if present */
1331         is->video_clock = pts;
1332     } else {
1333         pts = is->video_clock;
1334     }
1335     /* update video clock for next frame */
1336     frame_delay = av_q2d(is->video_st->codec->time_base);
1337     /* for MPEG2, the frame can be repeated, so we update the
1338        clock accordingly */
1339     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1340     is->video_clock += frame_delay;
1341
1342 #if defined(DEBUG_SYNC) && 0
1343     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1344            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1345 #endif
1346
1347     /* wait until we have space to put a new picture */
1348     SDL_LockMutex(is->pictq_mutex);
1349
1350     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1351            !is->videoq.abort_request) {
1352         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1353     }
1354     SDL_UnlockMutex(is->pictq_mutex);
1355
1356     if (is->videoq.abort_request)
1357         return -1;
1358
1359     vp = &is->pictq[is->pictq_windex];
1360
1361     vp->duration = frame_delay;
1362
1363     /* alloc or resize hardware picture buffer */
1364     if (!vp->bmp || vp->reallocate ||
1365 #if CONFIG_AVFILTER
1366         vp->width  != is->out_video_filter->inputs[0]->w ||
1367         vp->height != is->out_video_filter->inputs[0]->h) {
1368 #else
1369         vp->width != is->video_st->codec->width ||
1370         vp->height != is->video_st->codec->height) {
1371 #endif
1372         SDL_Event event;
1373
1374         vp->allocated  = 0;
1375         vp->reallocate = 0;
1376
1377         /* the allocation must be done in the main thread to avoid
1378            locking problems */
1379         event.type = FF_ALLOC_EVENT;
1380         event.user.data1 = is;
1381         SDL_PushEvent(&event);
1382
1383         /* wait until the picture is allocated */
1384         SDL_LockMutex(is->pictq_mutex);
1385         while (!vp->allocated && !is->videoq.abort_request) {
1386             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1387         }
1388         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1389         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1390             while (!vp->allocated) {
1391                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1392             }
1393         }
1394         SDL_UnlockMutex(is->pictq_mutex);
1395
1396         if (is->videoq.abort_request)
1397             return -1;
1398     }
1399
1400     /* if the frame is not skipped, then display it */
1401     if (vp->bmp) {
1402         AVPicture pict;
1403 #if CONFIG_AVFILTER
1404         if(vp->picref)
1405             avfilter_unref_buffer(vp->picref);
1406         vp->picref = src_frame->opaque;
1407 #endif
1408
1409         /* get a pointer on the bitmap */
1410         SDL_LockYUVOverlay (vp->bmp);
1411
1412         memset(&pict,0,sizeof(AVPicture));
1413         pict.data[0] = vp->bmp->pixels[0];
1414         pict.data[1] = vp->bmp->pixels[2];
1415         pict.data[2] = vp->bmp->pixels[1];
1416
1417         pict.linesize[0] = vp->bmp->pitches[0];
1418         pict.linesize[1] = vp->bmp->pitches[2];
1419         pict.linesize[2] = vp->bmp->pitches[1];
1420
1421 #if CONFIG_AVFILTER
1422         //FIXME use direct rendering
1423         av_picture_copy(&pict, (AVPicture *)src_frame,
1424                         vp->pix_fmt, vp->width, vp->height);
1425 #else
1426         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1427         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1428             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1429             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1430         if (is->img_convert_ctx == NULL) {
1431             fprintf(stderr, "Cannot initialize the conversion context\n");
1432             exit(1);
1433         }
1434         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1435                   0, vp->height, pict.data, pict.linesize);
1436 #endif
1437         /* update the bitmap content */
1438         SDL_UnlockYUVOverlay(vp->bmp);
1439
1440         vp->pts = pts;
1441         vp->pos = pos;
1442         vp->skip = 0;
1443
1444         /* now we can update the picture count */
1445         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1446             is->pictq_windex = 0;
1447         SDL_LockMutex(is->pictq_mutex);
1448         is->pictq_size++;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450     }
1451     return 0;
1452 }
1453
1454 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1455 {
1456     int got_picture, i;
1457
1458     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1459         return -1;
1460
1461     if (pkt->data == flush_pkt.data) {
1462         avcodec_flush_buffers(is->video_st->codec);
1463
1464         SDL_LockMutex(is->pictq_mutex);
1465         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1466         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1467             is->pictq[i].skip = 1;
1468         }
1469         while (is->pictq_size && !is->videoq.abort_request) {
1470             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1471         }
1472         is->video_current_pos = -1;
1473         is->frame_last_pts = AV_NOPTS_VALUE;
1474         is->frame_last_duration = 0;
1475         is->frame_timer = (double)av_gettime() / 1000000.0;
1476         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1477         SDL_UnlockMutex(is->pictq_mutex);
1478
1479         return 0;
1480     }
1481
1482     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1483
1484     if (got_picture) {
1485         int ret = 1;
1486
1487         if (decoder_reorder_pts == -1) {
1488             *pts = frame->best_effort_timestamp;
1489         } else if (decoder_reorder_pts) {
1490             *pts = frame->pkt_pts;
1491         } else {
1492             *pts = frame->pkt_dts;
1493         }
1494
1495         if (*pts == AV_NOPTS_VALUE) {
1496             *pts = 0;
1497         }
1498
1499         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1500              (framedrop>0 || (framedrop && is->audio_st))) {
1501             SDL_LockMutex(is->pictq_mutex);
1502             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1503                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1504                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1505                 double ptsdiff = dpts - is->frame_last_pts;
1506                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1507                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1508                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1509                     is->frame_last_dropped_pos = pkt->pos;
1510                     is->frame_last_dropped_pts = dpts;
1511                     is->frame_drops_early++;
1512                     ret = 0;
1513                 }
1514             }
1515             SDL_UnlockMutex(is->pictq_mutex);
1516         }
1517
1518         if (ret)
1519             is->frame_last_returned_time = av_gettime() / 1000000.0;
1520
1521         return ret;
1522     }
1523     return 0;
1524 }
1525
1526 #if CONFIG_AVFILTER
1527 typedef struct {
1528     VideoState *is;
1529     AVFrame *frame;
1530     int use_dr1;
1531 } FilterPriv;
1532
1533 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1534 {
1535     AVFilterContext *ctx = codec->opaque;
1536     AVFilterBufferRef  *ref;
1537     int perms = AV_PERM_WRITE;
1538     int i, w, h, stride[4];
1539     unsigned edge;
1540     int pixel_size;
1541
1542     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1543
1544     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1545         perms |= AV_PERM_NEG_LINESIZES;
1546
1547     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1548         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1549         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1550         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1551     }
1552     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1553
1554     w = codec->width;
1555     h = codec->height;
1556
1557     if(av_image_check_size(w, h, 0, codec))
1558         return -1;
1559
1560     avcodec_align_dimensions2(codec, &w, &h, stride);
1561     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1562     w += edge << 1;
1563     h += edge << 1;
1564
1565     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1566         return -1;
1567
1568     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1569     ref->video->w = codec->width;
1570     ref->video->h = codec->height;
1571     for(i = 0; i < 4; i ++) {
1572         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1573         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1574
1575         if (ref->data[i]) {
1576             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1577         }
1578         pic->data[i]     = ref->data[i];
1579         pic->linesize[i] = ref->linesize[i];
1580     }
1581     pic->opaque = ref;
1582     pic->age    = INT_MAX;
1583     pic->type   = FF_BUFFER_TYPE_USER;
1584     pic->reordered_opaque = codec->reordered_opaque;
1585     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1586     else           pic->pkt_pts = AV_NOPTS_VALUE;
1587     return 0;
1588 }
1589
1590 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1591 {
1592     memset(pic->data, 0, sizeof(pic->data));
1593     avfilter_unref_buffer(pic->opaque);
1594 }
1595
1596 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1597 {
1598     AVFilterBufferRef *ref = pic->opaque;
1599
1600     if (pic->data[0] == NULL) {
1601         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1602         return codec->get_buffer(codec, pic);
1603     }
1604
1605     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1606         (codec->pix_fmt != ref->format)) {
1607         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1608         return -1;
1609     }
1610
1611     pic->reordered_opaque = codec->reordered_opaque;
1612     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1613     else           pic->pkt_pts = AV_NOPTS_VALUE;
1614     return 0;
1615 }
1616
1617 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1618 {
1619     FilterPriv *priv = ctx->priv;
1620     AVCodecContext *codec;
1621     if(!opaque) return -1;
1622
1623     priv->is = opaque;
1624     codec    = priv->is->video_st->codec;
1625     codec->opaque = ctx;
1626     if((codec->codec->capabilities & CODEC_CAP_DR1)
1627     ) {
1628         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1629         priv->use_dr1 = 1;
1630         codec->get_buffer     = input_get_buffer;
1631         codec->release_buffer = input_release_buffer;
1632         codec->reget_buffer   = input_reget_buffer;
1633         codec->thread_safe_callbacks = 1;
1634     }
1635
1636     priv->frame = avcodec_alloc_frame();
1637
1638     return 0;
1639 }
1640
1641 static void input_uninit(AVFilterContext *ctx)
1642 {
1643     FilterPriv *priv = ctx->priv;
1644     av_free(priv->frame);
1645 }
1646
1647 static int input_request_frame(AVFilterLink *link)
1648 {
1649     FilterPriv *priv = link->src->priv;
1650     AVFilterBufferRef *picref;
1651     int64_t pts = 0;
1652     AVPacket pkt;
1653     int ret;
1654
1655     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1656         av_free_packet(&pkt);
1657     if (ret < 0)
1658         return -1;
1659
1660     if(priv->use_dr1 && priv->frame->opaque) {
1661         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1662     } else {
1663         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1664         av_image_copy(picref->data, picref->linesize,
1665                       priv->frame->data, priv->frame->linesize,
1666                       picref->format, link->w, link->h);
1667     }
1668     av_free_packet(&pkt);
1669
1670     avfilter_copy_frame_props(picref, priv->frame);
1671     picref->pts = pts;
1672
1673     avfilter_start_frame(link, picref);
1674     avfilter_draw_slice(link, 0, link->h, 1);
1675     avfilter_end_frame(link);
1676
1677     return 0;
1678 }
1679
1680 static int input_query_formats(AVFilterContext *ctx)
1681 {
1682     FilterPriv *priv = ctx->priv;
1683     enum PixelFormat pix_fmts[] = {
1684         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1685     };
1686
1687     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1688     return 0;
1689 }
1690
1691 static int input_config_props(AVFilterLink *link)
1692 {
1693     FilterPriv *priv  = link->src->priv;
1694     AVStream *s = priv->is->video_st;
1695
1696     link->w = s->codec->width;
1697     link->h = s->codec->height;
1698     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1699         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1700     link->time_base = s->time_base;
1701
1702     return 0;
1703 }
1704
1705 static AVFilter input_filter =
1706 {
1707     .name      = "ffplay_input",
1708
1709     .priv_size = sizeof(FilterPriv),
1710
1711     .init      = input_init,
1712     .uninit    = input_uninit,
1713
1714     .query_formats = input_query_formats,
1715
1716     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1717     .outputs   = (AVFilterPad[]) {{ .name = "default",
1718                                     .type = AVMEDIA_TYPE_VIDEO,
1719                                     .request_frame = input_request_frame,
1720                                     .config_props  = input_config_props, },
1721                                   { .name = NULL }},
1722 };
1723
1724 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1725 {
1726     char sws_flags_str[128];
1727     int ret;
1728     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1729     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1730     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1731     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1732     graph->scale_sws_opts = av_strdup(sws_flags_str);
1733
1734     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1735                                             NULL, is, graph)) < 0)
1736         return ret;
1737 #if FF_API_OLD_VSINK_API
1738     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1739                                        NULL, pix_fmts, graph);
1740 #else
1741     buffersink_params->pixel_fmts = pix_fmts;
1742     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1743                                        NULL, buffersink_params, graph);
1744 #endif
1745     av_freep(&buffersink_params);
1746     if (ret < 0)
1747         return ret;
1748
1749     if(vfilters) {
1750         AVFilterInOut *outputs = avfilter_inout_alloc();
1751         AVFilterInOut *inputs  = avfilter_inout_alloc();
1752
1753         outputs->name    = av_strdup("in");
1754         outputs->filter_ctx = filt_src;
1755         outputs->pad_idx = 0;
1756         outputs->next    = NULL;
1757
1758         inputs->name    = av_strdup("out");
1759         inputs->filter_ctx = filt_out;
1760         inputs->pad_idx = 0;
1761         inputs->next    = NULL;
1762
1763         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1764             return ret;
1765     } else {
1766         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1767             return ret;
1768     }
1769
1770     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1771         return ret;
1772
1773     is->out_video_filter = filt_out;
1774
1775     return ret;
1776 }
1777
1778 #endif  /* CONFIG_AVFILTER */
1779
1780 static int video_thread(void *arg)
1781 {
1782     VideoState *is = arg;
1783     AVFrame *frame= avcodec_alloc_frame();
1784     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1785     double pts;
1786     int ret;
1787
1788 #if CONFIG_AVFILTER
1789     AVFilterGraph *graph = avfilter_graph_alloc();
1790     AVFilterContext *filt_out = NULL;
1791     int last_w = is->video_st->codec->width;
1792     int last_h = is->video_st->codec->height;
1793
1794     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1795         goto the_end;
1796     filt_out = is->out_video_filter;
1797 #endif
1798
1799     for(;;) {
1800 #if !CONFIG_AVFILTER
1801         AVPacket pkt;
1802 #else
1803         AVFilterBufferRef *picref;
1804         AVRational tb = filt_out->inputs[0]->time_base;
1805 #endif
1806         while (is->paused && !is->videoq.abort_request)
1807             SDL_Delay(10);
1808 #if CONFIG_AVFILTER
1809         if (   last_w != is->video_st->codec->width
1810             || last_h != is->video_st->codec->height) {
1811             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1812                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1813             avfilter_graph_free(&graph);
1814             graph = avfilter_graph_alloc();
1815             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1816                 goto the_end;
1817             filt_out = is->out_video_filter;
1818             last_w = is->video_st->codec->width;
1819             last_h = is->video_st->codec->height;
1820         }
1821         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1822         if (picref) {
1823             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1824             pts_int = picref->pts;
1825             pos     = picref->pos;
1826             frame->opaque = picref;
1827         }
1828
1829         if (av_cmp_q(tb, is->video_st->time_base)) {
1830             av_unused int64_t pts1 = pts_int;
1831             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1832             av_dlog(NULL, "video_thread(): "
1833                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1834                     tb.num, tb.den, pts1,
1835                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1836         }
1837 #else
1838         ret = get_video_frame(is, frame, &pts_int, &pkt);
1839         pos = pkt.pos;
1840         av_free_packet(&pkt);
1841 #endif
1842
1843         if (ret < 0) goto the_end;
1844
1845         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1846         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1847             is->frame_last_filter_delay = 0;
1848
1849 #if CONFIG_AVFILTER
1850         if (!picref)
1851             continue;
1852 #endif
1853
1854         pts = pts_int*av_q2d(is->video_st->time_base);
1855
1856         ret = queue_picture(is, frame, pts, pos);
1857
1858         if (ret < 0)
1859             goto the_end;
1860
1861         if (is->step)
1862             stream_toggle_pause(is);
1863     }
1864  the_end:
1865 #if CONFIG_AVFILTER
1866     avfilter_graph_free(&graph);
1867 #endif
1868     av_free(frame);
1869     return 0;
1870 }
1871
1872 static int subtitle_thread(void *arg)
1873 {
1874     VideoState *is = arg;
1875     SubPicture *sp;
1876     AVPacket pkt1, *pkt = &pkt1;
1877     int got_subtitle;
1878     double pts;
1879     int i, j;
1880     int r, g, b, y, u, v, a;
1881
1882     for(;;) {
1883         while (is->paused && !is->subtitleq.abort_request) {
1884             SDL_Delay(10);
1885         }
1886         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1887             break;
1888
1889         if(pkt->data == flush_pkt.data){
1890             avcodec_flush_buffers(is->subtitle_st->codec);
1891             continue;
1892         }
1893         SDL_LockMutex(is->subpq_mutex);
1894         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1895                !is->subtitleq.abort_request) {
1896             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1897         }
1898         SDL_UnlockMutex(is->subpq_mutex);
1899
1900         if (is->subtitleq.abort_request)
1901             return 0;
1902
1903         sp = &is->subpq[is->subpq_windex];
1904
1905        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1906            this packet, if any */
1907         pts = 0;
1908         if (pkt->pts != AV_NOPTS_VALUE)
1909             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1910
1911         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1912                                  &got_subtitle, pkt);
1913
1914         if (got_subtitle && sp->sub.format == 0) {
1915             sp->pts = pts;
1916
1917             for (i = 0; i < sp->sub.num_rects; i++)
1918             {
1919                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1920                 {
1921                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1922                     y = RGB_TO_Y_CCIR(r, g, b);
1923                     u = RGB_TO_U_CCIR(r, g, b, 0);
1924                     v = RGB_TO_V_CCIR(r, g, b, 0);
1925                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1926                 }
1927             }
1928
1929             /* now we can update the picture count */
1930             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1931                 is->subpq_windex = 0;
1932             SDL_LockMutex(is->subpq_mutex);
1933             is->subpq_size++;
1934             SDL_UnlockMutex(is->subpq_mutex);
1935         }
1936         av_free_packet(pkt);
1937     }
1938     return 0;
1939 }
1940
1941 /* copy samples for viewing in editor window */
1942 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1943 {
1944     int size, len;
1945
1946     size = samples_size / sizeof(short);
1947     while (size > 0) {
1948         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1949         if (len > size)
1950             len = size;
1951         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1952         samples += len;
1953         is->sample_array_index += len;
1954         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1955             is->sample_array_index = 0;
1956         size -= len;
1957     }
1958 }
1959
1960 /* return the new audio buffer size (samples can be added or deleted
1961    to get better sync if video or external master clock) */
1962 static int synchronize_audio(VideoState *is, short *samples,
1963                              int samples_size1, double pts)
1964 {
1965     int n, samples_size;
1966     double ref_clock;
1967
1968     n = av_get_bytes_per_sample(is->audio_tgt_fmt) * is->audio_tgt_channels;
1969     samples_size = samples_size1;
1970
1971     /* if not master, then we try to remove or add samples to correct the clock */
1972     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1973          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1974         double diff, avg_diff;
1975         int wanted_size, min_size, max_size, nb_samples;
1976
1977         ref_clock = get_master_clock(is);
1978         diff = get_audio_clock(is) - ref_clock;
1979
1980         if (diff < AV_NOSYNC_THRESHOLD) {
1981             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1982             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1983                 /* not enough measures to have a correct estimate */
1984                 is->audio_diff_avg_count++;
1985             } else {
1986                 /* estimate the A-V difference */
1987                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1988
1989                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1990                     wanted_size = samples_size + ((int)(diff * is->audio_tgt_freq) * n);
1991                     nb_samples = samples_size / n;
1992
1993                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1994                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1995                     if (wanted_size < min_size)
1996                         wanted_size = min_size;
1997                     else if (wanted_size > FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2)))
1998                         wanted_size = FFMIN3(max_size, sizeof(is->audio_buf1), sizeof(is->audio_buf2));
1999
2000                     /* add or remove samples to correction the synchro */
2001                     if (wanted_size < samples_size) {
2002                         /* remove samples */
2003                         samples_size = wanted_size;
2004                     } else if (wanted_size > samples_size) {
2005                         uint8_t *samples_end, *q;
2006                         int nb;
2007
2008                         /* add samples */
2009                         nb = (samples_size - wanted_size);
2010                         samples_end = (uint8_t *)samples + samples_size - n;
2011                         q = samples_end + n;
2012                         while (nb > 0) {
2013                             memcpy(q, samples_end, n);
2014                             q += n;
2015                             nb -= n;
2016                         }
2017                         samples_size = wanted_size;
2018                     }
2019                 }
2020                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2021                         diff, avg_diff, samples_size - samples_size1,
2022                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2023             }
2024         } else {
2025             /* too big difference : may be initial PTS errors, so
2026                reset A-V filter */
2027             is->audio_diff_avg_count = 0;
2028             is->audio_diff_cum = 0;
2029         }
2030     }
2031
2032     return samples_size;
2033 }
2034
2035 /* decode one audio frame and returns its uncompressed size */
2036 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2037 {
2038     AVPacket *pkt_temp = &is->audio_pkt_temp;
2039     AVPacket *pkt = &is->audio_pkt;
2040     AVCodecContext *dec= is->audio_st->codec;
2041     int len1, len2, data_size, resampled_data_size;
2042     int64_t dec_channel_layout;
2043     double pts;
2044     int new_packet = 0;
2045     int flush_complete = 0;
2046
2047     for(;;) {
2048         /* NOTE: the audio packet can contain several frames */
2049         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2050             if (flush_complete)
2051                 break;
2052             new_packet = 0;
2053             data_size = sizeof(is->audio_buf1);
2054             len1 = avcodec_decode_audio3(dec,
2055                                         (int16_t *)is->audio_buf1, &data_size,
2056                                         pkt_temp);
2057             if (len1 < 0) {
2058                 /* if error, we skip the frame */
2059                 pkt_temp->size = 0;
2060                 break;
2061             }
2062
2063             pkt_temp->data += len1;
2064             pkt_temp->size -= len1;
2065
2066             if (data_size <= 0) {
2067                 /* stop sending empty packets if the decoder is finished */
2068                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2069                     flush_complete = 1;
2070                 continue;
2071             }
2072
2073             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2074
2075             if (dec->sample_fmt != is->audio_src_fmt || dec_channel_layout != is->audio_src_channel_layout || dec->sample_rate != is->audio_src_freq) {
2076                 if (is->swr_ctx)
2077                     swr_free(&is->swr_ctx);
2078                 is->swr_ctx = swr_alloc2(NULL, is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2079                                                dec_channel_layout,          dec->sample_fmt,   dec->sample_rate,
2080                                                0, NULL);
2081                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2082                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2083                         dec->sample_rate,
2084                         av_get_sample_fmt_name(dec->sample_fmt),
2085                         dec->channels,
2086                         is->audio_tgt_freq,
2087                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2088                         is->audio_tgt_channels);
2089                     break;
2090                 }
2091                 is->audio_src_channel_layout = dec_channel_layout;
2092                 is->audio_src_channels = dec->channels;
2093                 is->audio_src_freq = dec->sample_rate;
2094                 is->audio_src_fmt = dec->sample_fmt;
2095             }
2096
2097             resampled_data_size = data_size;
2098             if (is->swr_ctx) {
2099                 const uint8_t *in[] = {is->audio_buf1};
2100                 uint8_t *out[] = {is->audio_buf2};
2101                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2102                                                 in, data_size / dec->channels / av_get_bytes_per_sample(dec->sample_fmt));
2103                 if (len2 < 0) {
2104                     fprintf(stderr, "audio_resample() failed\n");
2105                     break;
2106                 }
2107                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2108                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2109                     swr_init(is->swr_ctx);
2110                 }
2111                 is->audio_buf = is->audio_buf2;
2112                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2113             } else {
2114                 is->audio_buf= is->audio_buf1;
2115             }
2116
2117             /* if no pts, then compute it */
2118             pts = is->audio_clock;
2119             *pts_ptr = pts;
2120             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2121 #ifdef DEBUG
2122             {
2123                 static double last_clock;
2124                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2125                        is->audio_clock - last_clock,
2126                        is->audio_clock, pts);
2127                 last_clock = is->audio_clock;
2128             }
2129 #endif
2130             return resampled_data_size;
2131         }
2132
2133         /* free the current packet */
2134         if (pkt->data)
2135             av_free_packet(pkt);
2136
2137         if (is->paused || is->audioq.abort_request) {
2138             return -1;
2139         }
2140
2141         /* read next packet */
2142         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2143             return -1;
2144
2145         if (pkt->data == flush_pkt.data)
2146             avcodec_flush_buffers(dec);
2147
2148         pkt_temp->data = pkt->data;
2149         pkt_temp->size = pkt->size;
2150
2151         /* if update the audio clock with the pts */
2152         if (pkt->pts != AV_NOPTS_VALUE) {
2153             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2154         }
2155     }
2156 }
2157
2158 /* prepare a new audio buffer */
2159 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2160 {
2161     VideoState *is = opaque;
2162     int audio_size, len1;
2163     int bytes_per_sec;
2164     double pts;
2165
2166     audio_callback_time = av_gettime();
2167
2168     while (len > 0) {
2169         if (is->audio_buf_index >= is->audio_buf_size) {
2170            audio_size = audio_decode_frame(is, &pts);
2171            if (audio_size < 0) {
2172                 /* if error, just output silence */
2173                is->audio_buf = is->audio_buf1;
2174                is->audio_buf_size = 256 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2175                memset(is->audio_buf, 0, is->audio_buf_size);
2176            } else {
2177                if (is->show_mode != SHOW_MODE_VIDEO)
2178                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2179                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2180                                               pts);
2181                is->audio_buf_size = audio_size;
2182            }
2183            is->audio_buf_index = 0;
2184         }
2185         len1 = is->audio_buf_size - is->audio_buf_index;
2186         if (len1 > len)
2187             len1 = len;
2188         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2189         len -= len1;
2190         stream += len1;
2191         is->audio_buf_index += len1;
2192     }
2193     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2194     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2195     /* Let's assume the audio driver that is used by SDL has two periods. */
2196     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2197     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2198 }
2199
2200 /* open a given stream. Return 0 if OK */
2201 static int stream_component_open(VideoState *is, int stream_index)
2202 {
2203     AVFormatContext *ic = is->ic;
2204     AVCodecContext *avctx;
2205     AVCodec *codec;
2206     SDL_AudioSpec wanted_spec, spec;
2207     AVDictionary *opts;
2208     AVDictionaryEntry *t = NULL;
2209     int64_t wanted_channel_layout = 0;
2210
2211     if (stream_index < 0 || stream_index >= ic->nb_streams)
2212         return -1;
2213     avctx = ic->streams[stream_index]->codec;
2214
2215     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2216
2217     codec = avcodec_find_decoder(avctx->codec_id);
2218     switch(avctx->codec_type){
2219         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2220         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2221         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2222     }
2223     if (!codec)
2224         return -1;
2225
2226     avctx->workaround_bugs = workaround_bugs;
2227     avctx->lowres = lowres;
2228     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2229     avctx->idct_algo= idct;
2230     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2231     avctx->skip_frame= skip_frame;
2232     avctx->skip_idct= skip_idct;
2233     avctx->skip_loop_filter= skip_loop_filter;
2234     avctx->error_recognition= error_recognition;
2235     avctx->error_concealment= error_concealment;
2236
2237     if(codec->capabilities & CODEC_CAP_DR1)
2238         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2239
2240     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2241         wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channels)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2242         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2243         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2244         wanted_spec.freq = avctx->sample_rate;
2245         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2246             fprintf(stderr, "Invalid sample rate or channel count!\n");
2247             return -1;
2248         }
2249     }
2250
2251     if (!codec ||
2252         avcodec_open2(avctx, codec, &opts) < 0)
2253         return -1;
2254     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2255         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2256         return AVERROR_OPTION_NOT_FOUND;
2257     }
2258
2259     /* prepare audio output */
2260     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2261         wanted_spec.format = AUDIO_S16SYS;
2262         wanted_spec.silence = 0;
2263         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2264         wanted_spec.callback = sdl_audio_callback;
2265         wanted_spec.userdata = is;
2266         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2267             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2268             return -1;
2269         }
2270         is->audio_hw_buf_size = spec.size;
2271         if (spec.format != AUDIO_S16SYS) {
2272             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2273             return -1;
2274         }
2275         if (spec.channels != wanted_spec.channels) {
2276             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2277             if (!wanted_channel_layout) {
2278                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2279                 return -1;
2280             }
2281         }
2282         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2283         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2284         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2285         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2286     }
2287
2288     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2289     switch(avctx->codec_type) {
2290     case AVMEDIA_TYPE_AUDIO:
2291         is->audio_stream = stream_index;
2292         is->audio_st = ic->streams[stream_index];
2293         is->audio_buf_size = 0;
2294         is->audio_buf_index = 0;
2295
2296         /* init averaging filter */
2297         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2298         is->audio_diff_avg_count = 0;
2299         /* since we do not have a precise anough audio fifo fullness,
2300            we correct audio sync only if larger than this threshold */
2301         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2302
2303         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2304         packet_queue_init(&is->audioq);
2305         SDL_PauseAudio(0);
2306         break;
2307     case AVMEDIA_TYPE_VIDEO:
2308         is->video_stream = stream_index;
2309         is->video_st = ic->streams[stream_index];
2310
2311         packet_queue_init(&is->videoq);
2312         is->video_tid = SDL_CreateThread(video_thread, is);
2313         break;
2314     case AVMEDIA_TYPE_SUBTITLE:
2315         is->subtitle_stream = stream_index;
2316         is->subtitle_st = ic->streams[stream_index];
2317         packet_queue_init(&is->subtitleq);
2318
2319         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2320         break;
2321     default:
2322         break;
2323     }
2324     return 0;
2325 }
2326
2327 static void stream_component_close(VideoState *is, int stream_index)
2328 {
2329     AVFormatContext *ic = is->ic;
2330     AVCodecContext *avctx;
2331
2332     if (stream_index < 0 || stream_index >= ic->nb_streams)
2333         return;
2334     avctx = ic->streams[stream_index]->codec;
2335
2336     switch(avctx->codec_type) {
2337     case AVMEDIA_TYPE_AUDIO:
2338         packet_queue_abort(&is->audioq);
2339
2340         SDL_CloseAudio();
2341
2342         packet_queue_end(&is->audioq);
2343         if (is->swr_ctx)
2344             swr_free(&is->swr_ctx);
2345         av_free_packet(&is->audio_pkt);
2346
2347         if (is->rdft) {
2348             av_rdft_end(is->rdft);
2349             av_freep(&is->rdft_data);
2350         }
2351         break;
2352     case AVMEDIA_TYPE_VIDEO:
2353         packet_queue_abort(&is->videoq);
2354
2355         /* note: we also signal this mutex to make sure we deblock the
2356            video thread in all cases */
2357         SDL_LockMutex(is->pictq_mutex);
2358         SDL_CondSignal(is->pictq_cond);
2359         SDL_UnlockMutex(is->pictq_mutex);
2360
2361         SDL_WaitThread(is->video_tid, NULL);
2362
2363         packet_queue_end(&is->videoq);
2364         break;
2365     case AVMEDIA_TYPE_SUBTITLE:
2366         packet_queue_abort(&is->subtitleq);
2367
2368         /* note: we also signal this mutex to make sure we deblock the
2369            video thread in all cases */
2370         SDL_LockMutex(is->subpq_mutex);
2371         is->subtitle_stream_changed = 1;
2372
2373         SDL_CondSignal(is->subpq_cond);
2374         SDL_UnlockMutex(is->subpq_mutex);
2375
2376         SDL_WaitThread(is->subtitle_tid, NULL);
2377
2378         packet_queue_end(&is->subtitleq);
2379         break;
2380     default:
2381         break;
2382     }
2383
2384     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2385     avcodec_close(avctx);
2386     switch(avctx->codec_type) {
2387     case AVMEDIA_TYPE_AUDIO:
2388         is->audio_st = NULL;
2389         is->audio_stream = -1;
2390         break;
2391     case AVMEDIA_TYPE_VIDEO:
2392         is->video_st = NULL;
2393         is->video_stream = -1;
2394         break;
2395     case AVMEDIA_TYPE_SUBTITLE:
2396         is->subtitle_st = NULL;
2397         is->subtitle_stream = -1;
2398         break;
2399     default:
2400         break;
2401     }
2402 }
2403
2404 /* since we have only one decoding thread, we can use a global
2405    variable instead of a thread local variable */
2406 static VideoState *global_video_state;
2407
2408 static int decode_interrupt_cb(void)
2409 {
2410     return (global_video_state && global_video_state->abort_request);
2411 }
2412
2413 /* this thread gets the stream from the disk or the network */
2414 static int read_thread(void *arg)
2415 {
2416     VideoState *is = arg;
2417     AVFormatContext *ic = NULL;
2418     int err, i, ret;
2419     int st_index[AVMEDIA_TYPE_NB];
2420     AVPacket pkt1, *pkt = &pkt1;
2421     int eof=0;
2422     int pkt_in_play_range = 0;
2423     AVDictionaryEntry *t;
2424     AVDictionary **opts;
2425     int orig_nb_streams;
2426
2427     memset(st_index, -1, sizeof(st_index));
2428     is->video_stream = -1;
2429     is->audio_stream = -1;
2430     is->subtitle_stream = -1;
2431
2432     global_video_state = is;
2433     avio_set_interrupt_cb(decode_interrupt_cb);
2434
2435     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2436     if (err < 0) {
2437         print_error(is->filename, err);
2438         ret = -1;
2439         goto fail;
2440     }
2441     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2442         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2443         ret = AVERROR_OPTION_NOT_FOUND;
2444         goto fail;
2445     }
2446     is->ic = ic;
2447
2448     if(genpts)
2449         ic->flags |= AVFMT_FLAG_GENPTS;
2450
2451     av_dict_set(&codec_opts, "request_channels", "2", 0);
2452
2453     opts = setup_find_stream_info_opts(ic, codec_opts);
2454     orig_nb_streams = ic->nb_streams;
2455
2456     err = avformat_find_stream_info(ic, opts);
2457     if (err < 0) {
2458         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2459         ret = -1;
2460         goto fail;
2461     }
2462     for (i = 0; i < orig_nb_streams; i++)
2463         av_dict_free(&opts[i]);
2464     av_freep(&opts);
2465
2466     if(ic->pb)
2467         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2468
2469     if(seek_by_bytes<0)
2470         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2471
2472     /* if seeking requested, we execute it */
2473     if (start_time != AV_NOPTS_VALUE) {
2474         int64_t timestamp;
2475
2476         timestamp = start_time;
2477         /* add the stream start time */
2478         if (ic->start_time != AV_NOPTS_VALUE)
2479             timestamp += ic->start_time;
2480         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2481         if (ret < 0) {
2482             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2483                     is->filename, (double)timestamp / AV_TIME_BASE);
2484         }
2485     }
2486
2487     for (i = 0; i < ic->nb_streams; i++)
2488         ic->streams[i]->discard = AVDISCARD_ALL;
2489     if (!video_disable)
2490         st_index[AVMEDIA_TYPE_VIDEO] =
2491             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2492                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2493     if (!audio_disable)
2494         st_index[AVMEDIA_TYPE_AUDIO] =
2495             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2496                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2497                                 st_index[AVMEDIA_TYPE_VIDEO],
2498                                 NULL, 0);
2499     if (!video_disable)
2500         st_index[AVMEDIA_TYPE_SUBTITLE] =
2501             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2502                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2503                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2504                                  st_index[AVMEDIA_TYPE_AUDIO] :
2505                                  st_index[AVMEDIA_TYPE_VIDEO]),
2506                                 NULL, 0);
2507     if (show_status) {
2508         av_dump_format(ic, 0, is->filename, 0);
2509     }
2510
2511     is->show_mode = show_mode;
2512
2513     /* open the streams */
2514     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2515         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2516     }
2517
2518     ret=-1;
2519     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2520         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2521     }
2522     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2523     if (is->show_mode == SHOW_MODE_NONE)
2524         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2525
2526     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2527         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2528     }
2529
2530     if (is->video_stream < 0 && is->audio_stream < 0) {
2531         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2532         ret = -1;
2533         goto fail;
2534     }
2535
2536     for(;;) {
2537         if (is->abort_request)
2538             break;
2539         if (is->paused != is->last_paused) {
2540             is->last_paused = is->paused;
2541             if (is->paused)
2542                 is->read_pause_return= av_read_pause(ic);
2543             else
2544                 av_read_play(ic);
2545         }
2546 #if CONFIG_RTSP_DEMUXER
2547         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2548             /* wait 10 ms to avoid trying to get another packet */
2549             /* XXX: horrible */
2550             SDL_Delay(10);
2551             continue;
2552         }
2553 #endif
2554         if (is->seek_req) {
2555             int64_t seek_target= is->seek_pos;
2556             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2557             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2558 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2559 //      of the seek_pos/seek_rel variables
2560
2561             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2562             if (ret < 0) {
2563                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2564             }else{
2565                 if (is->audio_stream >= 0) {
2566                     packet_queue_flush(&is->audioq);
2567                     packet_queue_put(&is->audioq, &flush_pkt);
2568                 }
2569                 if (is->subtitle_stream >= 0) {
2570                     packet_queue_flush(&is->subtitleq);
2571                     packet_queue_put(&is->subtitleq, &flush_pkt);
2572                 }
2573                 if (is->video_stream >= 0) {
2574                     packet_queue_flush(&is->videoq);
2575                     packet_queue_put(&is->videoq, &flush_pkt);
2576                 }
2577             }
2578             is->seek_req = 0;
2579             eof= 0;
2580         }
2581
2582         /* if the queue are full, no need to read more */
2583         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2584             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2585                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2586                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2587             /* wait 10 ms */
2588             SDL_Delay(10);
2589             continue;
2590         }
2591         if(eof) {
2592             if(is->video_stream >= 0){
2593                 av_init_packet(pkt);
2594                 pkt->data=NULL;
2595                 pkt->size=0;
2596                 pkt->stream_index= is->video_stream;
2597                 packet_queue_put(&is->videoq, pkt);
2598             }
2599             if (is->audio_stream >= 0 &&
2600                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2601                 av_init_packet(pkt);
2602                 pkt->data = NULL;
2603                 pkt->size = 0;
2604                 pkt->stream_index = is->audio_stream;
2605                 packet_queue_put(&is->audioq, pkt);
2606             }
2607             SDL_Delay(10);
2608             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2609                 if(loop!=1 && (!loop || --loop)){
2610                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2611                 }else if(autoexit){
2612                     ret=AVERROR_EOF;
2613                     goto fail;
2614                 }
2615             }
2616             eof=0;
2617             continue;
2618         }
2619         ret = av_read_frame(ic, pkt);
2620         if (ret < 0) {
2621             if (ret == AVERROR_EOF || url_feof(ic->pb))
2622                 eof=1;
2623             if (ic->pb && ic->pb->error)
2624                 break;
2625             SDL_Delay(100); /* wait for user event */
2626             continue;
2627         }
2628         /* check if packet is in play range specified by user, then queue, otherwise discard */
2629         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2630                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2631                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2632                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2633                 <= ((double)duration/1000000);
2634         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2635             packet_queue_put(&is->audioq, pkt);
2636         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2637             packet_queue_put(&is->videoq, pkt);
2638         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2639             packet_queue_put(&is->subtitleq, pkt);
2640         } else {
2641             av_free_packet(pkt);
2642         }
2643     }
2644     /* wait until the end */
2645     while (!is->abort_request) {
2646         SDL_Delay(100);
2647     }
2648
2649     ret = 0;
2650  fail:
2651     /* disable interrupting */
2652     global_video_state = NULL;
2653
2654     /* close each stream */
2655     if (is->audio_stream >= 0)
2656         stream_component_close(is, is->audio_stream);
2657     if (is->video_stream >= 0)
2658         stream_component_close(is, is->video_stream);
2659     if (is->subtitle_stream >= 0)
2660         stream_component_close(is, is->subtitle_stream);
2661     if (is->ic) {
2662         av_close_input_file(is->ic);
2663         is->ic = NULL; /* safety */
2664     }
2665     avio_set_interrupt_cb(NULL);
2666
2667     if (ret != 0) {
2668         SDL_Event event;
2669
2670         event.type = FF_QUIT_EVENT;
2671         event.user.data1 = is;
2672         SDL_PushEvent(&event);
2673     }
2674     return 0;
2675 }
2676
2677 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2678 {
2679     VideoState *is;
2680
2681     is = av_mallocz(sizeof(VideoState));
2682     if (!is)
2683         return NULL;
2684     av_strlcpy(is->filename, filename, sizeof(is->filename));
2685     is->iformat = iformat;
2686     is->ytop = 0;
2687     is->xleft = 0;
2688
2689     /* start video display */
2690     is->pictq_mutex = SDL_CreateMutex();
2691     is->pictq_cond = SDL_CreateCond();
2692
2693     is->subpq_mutex = SDL_CreateMutex();
2694     is->subpq_cond = SDL_CreateCond();
2695
2696     is->av_sync_type = av_sync_type;
2697     is->read_tid = SDL_CreateThread(read_thread, is);
2698     if (!is->read_tid) {
2699         av_free(is);
2700         return NULL;
2701     }
2702     return is;
2703 }
2704
2705 static void stream_cycle_channel(VideoState *is, int codec_type)
2706 {
2707     AVFormatContext *ic = is->ic;
2708     int start_index, stream_index;
2709     AVStream *st;
2710
2711     if (codec_type == AVMEDIA_TYPE_VIDEO)
2712         start_index = is->video_stream;
2713     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2714         start_index = is->audio_stream;
2715     else
2716         start_index = is->subtitle_stream;
2717     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2718         return;
2719     stream_index = start_index;
2720     for(;;) {
2721         if (++stream_index >= is->ic->nb_streams)
2722         {
2723             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2724             {
2725                 stream_index = -1;
2726                 goto the_end;
2727             } else
2728                 stream_index = 0;
2729         }
2730         if (stream_index == start_index)
2731             return;
2732         st = ic->streams[stream_index];
2733         if (st->codec->codec_type == codec_type) {
2734             /* check that parameters are OK */
2735             switch(codec_type) {
2736             case AVMEDIA_TYPE_AUDIO:
2737                 if (st->codec->sample_rate != 0 &&
2738                     st->codec->channels != 0)
2739                     goto the_end;
2740                 break;
2741             case AVMEDIA_TYPE_VIDEO:
2742             case AVMEDIA_TYPE_SUBTITLE:
2743                 goto the_end;
2744             default:
2745                 break;
2746             }
2747         }
2748     }
2749  the_end:
2750     stream_component_close(is, start_index);
2751     stream_component_open(is, stream_index);
2752 }
2753
2754
2755 static void toggle_full_screen(VideoState *is)
2756 {
2757     is_full_screen = !is_full_screen;
2758 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2759     /* OSX needs to reallocate the SDL overlays */
2760     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2761         is->pictq[i].reallocate = 1;
2762     }
2763 #endif
2764     video_open(is);
2765 }
2766
2767 static void toggle_pause(VideoState *is)
2768 {
2769     stream_toggle_pause(is);
2770     is->step = 0;
2771 }
2772
2773 static void step_to_next_frame(VideoState *is)
2774 {
2775     /* if the stream is paused unpause it, then step */
2776     if (is->paused)
2777         stream_toggle_pause(is);
2778     is->step = 1;
2779 }
2780
2781 static void toggle_audio_display(VideoState *is)
2782 {
2783     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2784     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2785     fill_rectangle(screen,
2786                 is->xleft, is->ytop, is->width, is->height,
2787                 bgcolor);
2788     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2789 }
2790
2791 /* handle an event sent by the GUI */
2792 static void event_loop(VideoState *cur_stream)
2793 {
2794     SDL_Event event;
2795     double incr, pos, frac;
2796
2797     for(;;) {
2798         double x;
2799         SDL_WaitEvent(&event);
2800         switch(event.type) {
2801         case SDL_KEYDOWN:
2802             if (exit_on_keydown) {
2803                 do_exit(cur_stream);
2804                 break;
2805             }
2806             switch(event.key.keysym.sym) {
2807             case SDLK_ESCAPE:
2808             case SDLK_q:
2809                 do_exit(cur_stream);
2810                 break;
2811             case SDLK_f:
2812                 toggle_full_screen(cur_stream);
2813                 break;
2814             case SDLK_p:
2815             case SDLK_SPACE:
2816                 toggle_pause(cur_stream);
2817                 break;
2818             case SDLK_s: //S: Step to next frame
2819                 step_to_next_frame(cur_stream);
2820                 break;
2821             case SDLK_a:
2822                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2823                 break;
2824             case SDLK_v:
2825                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2826                 break;
2827             case SDLK_t:
2828                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2829                 break;
2830             case SDLK_w:
2831                 toggle_audio_display(cur_stream);
2832                 break;
2833             case SDLK_LEFT:
2834                 incr = -10.0;
2835                 goto do_seek;
2836             case SDLK_RIGHT:
2837                 incr = 10.0;
2838                 goto do_seek;
2839             case SDLK_UP:
2840                 incr = 60.0;
2841                 goto do_seek;
2842             case SDLK_DOWN:
2843                 incr = -60.0;
2844             do_seek:
2845                 if (seek_by_bytes) {
2846                     if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2847                         pos= cur_stream->video_current_pos;
2848                     }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2849                         pos= cur_stream->audio_pkt.pos;
2850                     }else
2851                         pos = avio_tell(cur_stream->ic->pb);
2852                     if (cur_stream->ic->bit_rate)
2853                         incr *= cur_stream->ic->bit_rate / 8.0;
2854                     else
2855                         incr *= 180000.0;
2856                     pos += incr;
2857                     stream_seek(cur_stream, pos, incr, 1);
2858                 } else {
2859                     pos = get_master_clock(cur_stream);
2860                     pos += incr;
2861                     stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2862                 }
2863                 break;
2864             default:
2865                 break;
2866             }
2867             break;
2868         case SDL_MOUSEBUTTONDOWN:
2869             if (exit_on_mousedown) {
2870                 do_exit(cur_stream);
2871                 break;
2872             }
2873         case SDL_MOUSEMOTION:
2874             if(event.type ==SDL_MOUSEBUTTONDOWN){
2875                 x= event.button.x;
2876             }else{
2877                 if(event.motion.state != SDL_PRESSED)
2878                     break;
2879                 x= event.motion.x;
2880             }
2881             if(seek_by_bytes || cur_stream->ic->duration<=0){
2882                 uint64_t size=  avio_size(cur_stream->ic->pb);
2883                 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2884             }else{
2885                 int64_t ts;
2886                 int ns, hh, mm, ss;
2887                 int tns, thh, tmm, tss;
2888                 tns = cur_stream->ic->duration/1000000LL;
2889                 thh = tns/3600;
2890                 tmm = (tns%3600)/60;
2891                 tss = (tns%60);
2892                 frac = x/cur_stream->width;
2893                 ns = frac*tns;
2894                 hh = ns/3600;
2895                 mm = (ns%3600)/60;
2896                 ss = (ns%60);
2897                 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2898                         hh, mm, ss, thh, tmm, tss);
2899                 ts = frac*cur_stream->ic->duration;
2900                 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2901                     ts += cur_stream->ic->start_time;
2902                 stream_seek(cur_stream, ts, 0, 0);
2903             }
2904             break;
2905         case SDL_VIDEORESIZE:
2906             screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2907                                       SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2908             screen_width = cur_stream->width = event.resize.w;
2909             screen_height= cur_stream->height= event.resize.h;
2910             break;
2911         case SDL_QUIT:
2912         case FF_QUIT_EVENT:
2913             do_exit(cur_stream);
2914             break;
2915         case FF_ALLOC_EVENT:
2916             video_open(event.user.data1);
2917             alloc_picture(event.user.data1);
2918             break;
2919         case FF_REFRESH_EVENT:
2920             video_refresh(event.user.data1);
2921             cur_stream->refresh=0;
2922             break;
2923         default:
2924             break;
2925         }
2926     }
2927 }
2928
2929 static int opt_frame_size(const char *opt, const char *arg)
2930 {
2931     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2932     return opt_default("video_size", arg);
2933 }
2934
2935 static int opt_width(const char *opt, const char *arg)
2936 {
2937     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2938     return 0;
2939 }
2940
2941 static int opt_height(const char *opt, const char *arg)
2942 {
2943     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2944     return 0;
2945 }
2946
2947 static int opt_format(const char *opt, const char *arg)
2948 {
2949     file_iformat = av_find_input_format(arg);
2950     if (!file_iformat) {
2951         fprintf(stderr, "Unknown input format: %s\n", arg);
2952         return AVERROR(EINVAL);
2953     }
2954     return 0;
2955 }
2956
2957 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2958 {
2959     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2960     return opt_default("pixel_format", arg);
2961 }
2962
2963 static int opt_sync(const char *opt, const char *arg)
2964 {
2965     if (!strcmp(arg, "audio"))
2966         av_sync_type = AV_SYNC_AUDIO_MASTER;
2967     else if (!strcmp(arg, "video"))
2968         av_sync_type = AV_SYNC_VIDEO_MASTER;
2969     else if (!strcmp(arg, "ext"))
2970         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2971     else {
2972         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2973         exit(1);
2974     }
2975     return 0;
2976 }
2977
2978 static int opt_seek(const char *opt, const char *arg)
2979 {
2980     start_time = parse_time_or_die(opt, arg, 1);
2981     return 0;
2982 }
2983
2984 static int opt_duration(const char *opt, const char *arg)
2985 {
2986     duration = parse_time_or_die(opt, arg, 1);
2987     return 0;
2988 }
2989
2990 static int opt_show_mode(const char *opt, const char *arg)
2991 {
2992     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2993                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2994                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2995                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2996     return 0;
2997 }
2998
2999 static void opt_input_file(void *optctx, const char *filename)
3000 {
3001     if (input_filename) {
3002         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3003                 filename, input_filename);
3004         exit_program(1);
3005     }
3006     if (!strcmp(filename, "-"))
3007         filename = "pipe:";
3008     input_filename = filename;
3009 }
3010
3011 static int opt_codec(void *o, const char *opt, const char *arg)
3012 {
3013     switch(opt[strlen(opt)-1]){
3014     case 'a' :    audio_codec_name = arg; break;
3015     case 's' : subtitle_codec_name = arg; break;
3016     case 'v' :    video_codec_name = arg; break;
3017     }
3018     return 0;
3019 }
3020
3021 static int dummy;
3022
3023 static const OptionDef options[] = {
3024 #include "cmdutils_common_opts.h"
3025     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
3026     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
3027     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3028     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3029     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3030     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3031     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3032     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3033     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3034     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3035     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3036     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3037     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3038     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3039     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3040     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3041     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3042     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3043     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3044     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3045     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3046     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3047     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3048     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3049     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3050     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3051     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3052     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3053     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3054     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3055     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3056     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3057     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3058     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3059 #if CONFIG_AVFILTER
3060     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3061 #endif
3062     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3063     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3064     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3065     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3066     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3067     { NULL, },
3068 };
3069
3070 static void show_usage(void)
3071 {
3072     printf("Simple media player\n");
3073     printf("usage: %s [options] input_file\n", program_name);
3074     printf("\n");
3075 }
3076
3077 static int opt_help(const char *opt, const char *arg)
3078 {
3079     av_log_set_callback(log_callback_help);
3080     show_usage();
3081     show_help_options(options, "Main options:\n",
3082                       OPT_EXPERT, 0);
3083     show_help_options(options, "\nAdvanced options:\n",
3084                       OPT_EXPERT, OPT_EXPERT);
3085     printf("\n");
3086     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3087     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3088 #if !CONFIG_AVFILTER
3089     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3090 #endif
3091     printf("\nWhile playing:\n"
3092            "q, ESC              quit\n"
3093            "f                   toggle full screen\n"
3094            "p, SPC              pause\n"
3095            "a                   cycle audio channel\n"
3096            "v                   cycle video channel\n"
3097            "t                   cycle subtitle channel\n"
3098            "w                   show audio waves\n"
3099            "s                   activate frame-step mode\n"
3100            "left/right          seek backward/forward 10 seconds\n"
3101            "down/up             seek backward/forward 1 minute\n"
3102            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3103            );
3104     return 0;
3105 }
3106
3107 static int lockmgr(void **mtx, enum AVLockOp op)
3108 {
3109    switch(op) {
3110       case AV_LOCK_CREATE:
3111           *mtx = SDL_CreateMutex();
3112           if(!*mtx)
3113               return 1;
3114           return 0;
3115       case AV_LOCK_OBTAIN:
3116           return !!SDL_LockMutex(*mtx);
3117       case AV_LOCK_RELEASE:
3118           return !!SDL_UnlockMutex(*mtx);
3119       case AV_LOCK_DESTROY:
3120           SDL_DestroyMutex(*mtx);
3121           return 0;
3122    }
3123    return 1;
3124 }
3125
3126 /* Called from the main */
3127 int main(int argc, char **argv)
3128 {
3129     int flags;
3130     VideoState *is;
3131
3132     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3133     parse_loglevel(argc, argv, options);
3134
3135     /* register all codecs, demux and protocols */
3136     avcodec_register_all();
3137 #if CONFIG_AVDEVICE
3138     avdevice_register_all();
3139 #endif
3140 #if CONFIG_AVFILTER
3141     avfilter_register_all();
3142 #endif
3143     av_register_all();
3144
3145     init_opts();
3146
3147     show_banner();
3148
3149     parse_options(NULL, argc, argv, options, opt_input_file);
3150
3151     if (!input_filename) {
3152         show_usage();
3153         fprintf(stderr, "An input file must be specified\n");
3154         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3155         exit(1);
3156     }
3157
3158     if (display_disable) {
3159         video_disable = 1;
3160     }
3161     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3162     if (audio_disable)
3163         flags &= ~SDL_INIT_AUDIO;
3164 #if !defined(__MINGW32__) && !defined(__APPLE__)
3165     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3166 #endif
3167     if (SDL_Init (flags)) {
3168         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3169         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3170         exit(1);
3171     }
3172
3173     if (!display_disable) {
3174 #if HAVE_SDL_VIDEO_SIZE
3175         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3176         fs_screen_width = vi->current_w;
3177         fs_screen_height = vi->current_h;
3178 #endif
3179     }
3180
3181     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3182     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3183     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3184
3185     if (av_lockmgr_register(lockmgr)) {
3186         fprintf(stderr, "Could not initialize lock manager!\n");
3187         do_exit(NULL);
3188     }
3189
3190     av_init_packet(&flush_pkt);
3191     flush_pkt.data= "FLUSH";
3192
3193     is = stream_open(input_filename, file_iformat);
3194     if (!is) {
3195         fprintf(stderr, "Failed to initialize VideoState!\n");
3196         do_exit(NULL);
3197     }
3198
3199     event_loop(is);
3200
3201     /* never returns */
3202
3203     return 0;
3204 }