ffplay: skip return value of avcodec_decode_video2 / avcodec_decode_subtitle2
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     //    QETimer *video_timer;
205     char filename[1024];
206     int width, height, xleft, ytop;
207
208     PtsCorrectionContext pts_ctx;
209
210 #if CONFIG_AVFILTER
211     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212 #endif
213
214     float skip_frames;
215     float skip_frames_index;
216     int refresh;
217 } VideoState;
218
219 static void show_help(void);
220
221 /* options specified by the user */
222 static AVInputFormat *file_iformat;
223 static const char *input_filename;
224 static const char *window_title;
225 static int fs_screen_width;
226 static int fs_screen_height;
227 static int screen_width = 0;
228 static int screen_height = 0;
229 static int frame_width = 0;
230 static int frame_height = 0;
231 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB]={
235     [AVMEDIA_TYPE_AUDIO]=-1,
236     [AVMEDIA_TYPE_VIDEO]=-1,
237     [AVMEDIA_TYPE_SUBTITLE]=-1,
238 };
239 static int seek_by_bytes=-1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug = 0;
246 static int debug_mv = 0;
247 static int step = 0;
248 static int thread_count = 1;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int lowres = 0;
253 static int idct = FF_IDCT_AUTO;
254 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
257 static int error_recognition = FF_ER_CAREFUL;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts= -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop=1;
264 static int framedrop=1;
265
266 static int rdftspeed=20;
267 #if CONFIG_AVFILTER
268 static char *vfilters = NULL;
269 #endif
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_free_packet(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     /* duplicate the packet */
324     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325         return -1;
326
327     pkt1 = av_malloc(sizeof(AVPacketList));
328     if (!pkt1)
329         return -1;
330     pkt1->pkt = *pkt;
331     pkt1->next = NULL;
332
333
334     SDL_LockMutex(q->mutex);
335
336     if (!q->last_pkt)
337
338         q->first_pkt = pkt1;
339     else
340         q->last_pkt->next = pkt1;
341     q->last_pkt = pkt1;
342     q->nb_packets++;
343     q->size += pkt1->pkt.size + sizeof(*pkt1);
344     /* XXX: should duplicate packet data in DV case */
345     SDL_CondSignal(q->cond);
346
347     SDL_UnlockMutex(q->mutex);
348     return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for(;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414     unsigned int v = ((const uint32_t *)(s))[0];\
415     a = (v >> 24) & 0xff;\
416     r = (v >> 16) & 0xff;\
417     g = (v >> 8) & 0xff;\
418     b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424     a = (val >> 24) & 0xff;\
425     y = (val >> 16) & 0xff;\
426     u = (val >> 8) & 0xff;\
427     v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
439 {
440     int wrap, wrap3, width2, skip2;
441     int y, u, v, a, u1, v1, a1, w, h;
442     uint8_t *lum, *cb, *cr;
443     const uint8_t *p;
444     const uint32_t *pal;
445     int dstx, dsty, dstw, dsth;
446
447     dstw = av_clip(rect->w, 0, imgw);
448     dsth = av_clip(rect->h, 0, imgh);
449     dstx = av_clip(rect->x, 0, imgw - dstw);
450     dsty = av_clip(rect->y, 0, imgh - dsth);
451     lum = dst->data[0] + dsty * dst->linesize[0];
452     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
453     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
454
455     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
456     skip2 = dstx >> 1;
457     wrap = dst->linesize[0];
458     wrap3 = rect->pict.linesize[0];
459     p = rect->pict.data[0];
460     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
461
462     if (dsty & 1) {
463         lum += dstx;
464         cb += skip2;
465         cr += skip2;
466
467         if (dstx & 1) {
468             YUVA_IN(y, u, v, a, p, pal);
469             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
471             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
472             cb++;
473             cr++;
474             lum++;
475             p += BPP;
476         }
477         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
478             YUVA_IN(y, u, v, a, p, pal);
479             u1 = u;
480             v1 = v;
481             a1 = a;
482             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
484             YUVA_IN(y, u, v, a, p + BPP, pal);
485             u1 += u;
486             v1 += v;
487             a1 += a;
488             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
489             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
490             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
491             cb++;
492             cr++;
493             p += 2 * BPP;
494             lum += 2;
495         }
496         if (w) {
497             YUVA_IN(y, u, v, a, p, pal);
498             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
499             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
500             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
501             p++;
502             lum++;
503         }
504         p += wrap3 - dstw * BPP;
505         lum += wrap - dstw - dstx;
506         cb += dst->linesize[1] - width2 - skip2;
507         cr += dst->linesize[2] - width2 - skip2;
508     }
509     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
510         lum += dstx;
511         cb += skip2;
512         cr += skip2;
513
514         if (dstx & 1) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             p += wrap3;
521             lum += wrap;
522             YUVA_IN(y, u, v, a, p, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += -wrap3 + BPP;
532             lum += -wrap + 1;
533         }
534         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
535             YUVA_IN(y, u, v, a, p, pal);
536             u1 = u;
537             v1 = v;
538             a1 = a;
539             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
541             YUVA_IN(y, u, v, a, p + BPP, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546             p += wrap3;
547             lum += wrap;
548
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554
555             YUVA_IN(y, u, v, a, p + BPP, pal);
556             u1 += u;
557             v1 += v;
558             a1 += a;
559             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
560
561             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
562             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
563
564             cb++;
565             cr++;
566             p += -wrap3 + 2 * BPP;
567             lum += -wrap + 2;
568         }
569         if (w) {
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 = u;
572             v1 = v;
573             a1 = a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             p += wrap3;
576             lum += wrap;
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 += u;
579             v1 += v;
580             a1 += a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
583             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
584             cb++;
585             cr++;
586             p += -wrap3 + BPP;
587             lum += -wrap + 1;
588         }
589         p += wrap3 + (wrap3 - dstw * BPP);
590         lum += wrap + (wrap - dstw - dstx);
591         cb += dst->linesize[1] - width2 - skip2;
592         cr += dst->linesize[2] - width2 - skip2;
593     }
594     /* handle odd height */
595     if (h) {
596         lum += dstx;
597         cb += skip2;
598         cr += skip2;
599
600         if (dstx & 1) {
601             YUVA_IN(y, u, v, a, p, pal);
602             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605             cb++;
606             cr++;
607             lum++;
608             p += BPP;
609         }
610         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 = u;
613             v1 = v;
614             a1 = a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
617             YUVA_IN(y, u, v, a, p + BPP, pal);
618             u1 += u;
619             v1 += v;
620             a1 += a;
621             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
622             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
623             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
624             cb++;
625             cr++;
626             p += 2 * BPP;
627             lum += 2;
628         }
629         if (w) {
630             YUVA_IN(y, u, v, a, p, pal);
631             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
632             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
633             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
634         }
635     }
636 }
637
638 static void free_subpicture(SubPicture *sp)
639 {
640     avsubtitle_free(&sp->sub);
641 }
642
643 static void video_image_display(VideoState *is)
644 {
645     VideoPicture *vp;
646     SubPicture *sp;
647     AVPicture pict;
648     float aspect_ratio;
649     int width, height, x, y;
650     SDL_Rect rect;
651     int i;
652
653     vp = &is->pictq[is->pictq_rindex];
654     if (vp->bmp) {
655 #if CONFIG_AVFILTER
656          if (vp->picref->video->pixel_aspect.num == 0)
657              aspect_ratio = 0;
658          else
659              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
660 #else
661
662         /* XXX: use variable in the frame */
663         if (is->video_st->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
665         else if (is->video_st->codec->sample_aspect_ratio.num)
666             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
667         else
668             aspect_ratio = 0;
669 #endif
670         if (aspect_ratio <= 0.0)
671             aspect_ratio = 1.0;
672         aspect_ratio *= (float)vp->width / (float)vp->height;
673
674         if (is->subtitle_st)
675         {
676             if (is->subpq_size > 0)
677             {
678                 sp = &is->subpq[is->subpq_rindex];
679
680                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
681                 {
682                     SDL_LockYUVOverlay (vp->bmp);
683
684                     pict.data[0] = vp->bmp->pixels[0];
685                     pict.data[1] = vp->bmp->pixels[2];
686                     pict.data[2] = vp->bmp->pixels[1];
687
688                     pict.linesize[0] = vp->bmp->pitches[0];
689                     pict.linesize[1] = vp->bmp->pitches[2];
690                     pict.linesize[2] = vp->bmp->pitches[1];
691
692                     for (i = 0; i < sp->sub.num_rects; i++)
693                         blend_subrect(&pict, sp->sub.rects[i],
694                                       vp->bmp->w, vp->bmp->h);
695
696                     SDL_UnlockYUVOverlay (vp->bmp);
697                 }
698             }
699         }
700
701
702         /* XXX: we suppose the screen has a 1.0 pixel ratio */
703         height = is->height;
704         width = ((int)rint(height * aspect_ratio)) & ~1;
705         if (width > is->width) {
706             width = is->width;
707             height = ((int)rint(width / aspect_ratio)) & ~1;
708         }
709         x = (is->width - width) / 2;
710         y = (is->height - height) / 2;
711         is->no_background = 0;
712         rect.x = is->xleft + x;
713         rect.y = is->ytop  + y;
714         rect.w = width;
715         rect.h = height;
716         SDL_DisplayYUVOverlay(vp->bmp, &rect);
717     }
718 }
719
720 /* get the current audio output buffer size, in samples. With SDL, we
721    cannot have a precise information */
722 static int audio_write_get_buf_size(VideoState *is)
723 {
724     return is->audio_buf_size - is->audio_buf_index;
725 }
726
727 static inline int compute_mod(int a, int b)
728 {
729     a = a % b;
730     if (a >= 0)
731         return a;
732     else
733         return a + b;
734 }
735
736 static void video_audio_display(VideoState *s)
737 {
738     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
739     int ch, channels, h, h2, bgcolor, fgcolor;
740     int16_t time_diff;
741     int rdft_bits, nb_freq;
742
743     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
744         ;
745     nb_freq= 1<<(rdft_bits-1);
746
747     /* compute display index : center on currently output samples */
748     channels = s->audio_st->codec->channels;
749     nb_display_channels = channels;
750     if (!s->paused) {
751         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
752         n = 2 * channels;
753         delay = audio_write_get_buf_size(s);
754         delay /= n;
755
756         /* to be more precise, we take into account the time spent since
757            the last buffer computation */
758         if (audio_callback_time) {
759             time_diff = av_gettime() - audio_callback_time;
760             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
761         }
762
763         delay += 2*data_used;
764         if (delay < data_used)
765             delay = data_used;
766
767         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
768         if(s->show_audio==1){
769             h= INT_MIN;
770             for(i=0; i<1000; i+=channels){
771                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
772                 int a= s->sample_array[idx];
773                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
774                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
775                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
776                 int score= a-d;
777                 if(h<score && (b^c)<0){
778                     h= score;
779                     i_start= idx;
780                 }
781             }
782         }
783
784         s->last_i_start = i_start;
785     } else {
786         i_start = s->last_i_start;
787     }
788
789     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
790     if(s->show_audio==1){
791         fill_rectangle(screen,
792                        s->xleft, s->ytop, s->width, s->height,
793                        bgcolor);
794
795         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796
797         /* total height for one channel */
798         h = s->height / nb_display_channels;
799         /* graph height / 2 */
800         h2 = (h * 9) / 20;
801         for(ch = 0;ch < nb_display_channels; ch++) {
802             i = i_start + ch;
803             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804             for(x = 0; x < s->width; x++) {
805                 y = (s->sample_array[i] * h2) >> 15;
806                 if (y < 0) {
807                     y = -y;
808                     ys = y1 - y;
809                 } else {
810                     ys = y1;
811                 }
812                 fill_rectangle(screen,
813                                s->xleft + x, ys, 1, y,
814                                fgcolor);
815                 i += channels;
816                 if (i >= SAMPLE_ARRAY_SIZE)
817                     i -= SAMPLE_ARRAY_SIZE;
818             }
819         }
820
821         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822
823         for(ch = 1;ch < nb_display_channels; ch++) {
824             y = s->ytop + ch * h;
825             fill_rectangle(screen,
826                            s->xleft, y, s->width, 1,
827                            fgcolor);
828         }
829         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830     }else{
831         nb_display_channels= FFMIN(nb_display_channels, 2);
832         if(rdft_bits != s->rdft_bits){
833             av_rdft_end(s->rdft);
834             av_free(s->rdft_data);
835             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
836             s->rdft_bits= rdft_bits;
837             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
838         }
839         {
840             FFTSample *data[2];
841             for(ch = 0;ch < nb_display_channels; ch++) {
842                 data[ch] = s->rdft_data + 2*nb_freq*ch;
843                 i = i_start + ch;
844                 for(x = 0; x < 2*nb_freq; x++) {
845                     double w= (x-nb_freq)*(1.0/nb_freq);
846                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
847                     i += channels;
848                     if (i >= SAMPLE_ARRAY_SIZE)
849                         i -= SAMPLE_ARRAY_SIZE;
850                 }
851                 av_rdft_calc(s->rdft, data[ch]);
852             }
853             //least efficient way to do this, we should of course directly access it but its more than fast enough
854             for(y=0; y<s->height; y++){
855                 double w= 1/sqrt(nb_freq);
856                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
857                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
858                        + data[1][2*y+1]*data[1][2*y+1])) : a;
859                 a= FFMIN(a,255);
860                 b= FFMIN(b,255);
861                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
862
863                 fill_rectangle(screen,
864                             s->xpos, s->height-y, 1, 1,
865                             fgcolor);
866             }
867         }
868         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
869         s->xpos++;
870         if(s->xpos >= s->width)
871             s->xpos= s->xleft;
872     }
873 }
874
875 static int video_open(VideoState *is){
876     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
877     int w,h;
878
879     if(is_full_screen) flags |= SDL_FULLSCREEN;
880     else               flags |= SDL_RESIZABLE;
881
882     if (is_full_screen && fs_screen_width) {
883         w = fs_screen_width;
884         h = fs_screen_height;
885     } else if(!is_full_screen && screen_width){
886         w = screen_width;
887         h = screen_height;
888 #if CONFIG_AVFILTER
889     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
890         w = is->out_video_filter->inputs[0]->w;
891         h = is->out_video_filter->inputs[0]->h;
892 #else
893     }else if (is->video_st && is->video_st->codec->width){
894         w = is->video_st->codec->width;
895         h = is->video_st->codec->height;
896 #endif
897     } else {
898         w = 640;
899         h = 480;
900     }
901     if(screen && is->width == screen->w && screen->w == w
902        && is->height== screen->h && screen->h == h)
903         return 0;
904
905 #ifndef __APPLE__
906     screen = SDL_SetVideoMode(w, h, 0, flags);
907 #else
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #endif
911     if (!screen) {
912         fprintf(stderr, "SDL: could not set video mode - exiting\n");
913         return -1;
914     }
915     if (!window_title)
916         window_title = input_filename;
917     SDL_WM_SetCaption(window_title, window_title);
918
919     is->width = screen->w;
920     is->height = screen->h;
921
922     return 0;
923 }
924
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928     if(!screen)
929         video_open(cur_stream);
930     if (is->audio_st && is->show_audio)
931         video_audio_display(is);
932     else if (is->video_st)
933         video_image_display(is);
934 }
935
936 static int refresh_thread(void *opaque)
937 {
938     VideoState *is= opaque;
939     while(!is->abort_request){
940         SDL_Event event;
941         event.type = FF_REFRESH_EVENT;
942         event.user.data1 = opaque;
943         if(!is->refresh){
944             is->refresh=1;
945             SDL_PushEvent(&event);
946         }
947         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948     }
949     return 0;
950 }
951
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955     double pts;
956     int hw_buf_size, bytes_per_sec;
957     pts = is->audio_clock;
958     hw_buf_size = audio_write_get_buf_size(is);
959     bytes_per_sec = 0;
960     if (is->audio_st) {
961         bytes_per_sec = is->audio_st->codec->sample_rate *
962             2 * is->audio_st->codec->channels;
963     }
964     if (bytes_per_sec)
965         pts -= (double)hw_buf_size / bytes_per_sec;
966     return pts;
967 }
968
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972     if (is->paused) {
973         return is->video_current_pts;
974     } else {
975         return is->video_current_pts_drift + av_gettime() / 1000000.0;
976     }
977 }
978
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982     int64_t ti;
983     ti = av_gettime();
984     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990     double val;
991
992     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993         if (is->video_st)
994             val = get_video_clock(is);
995         else
996             val = get_audio_clock(is);
997     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998         if (is->audio_st)
999             val = get_audio_clock(is);
1000         else
1001             val = get_video_clock(is);
1002     } else {
1003         val = get_external_clock(is);
1004     }
1005     return val;
1006 }
1007
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011     if (!is->seek_req) {
1012         is->seek_pos = pos;
1013         is->seek_rel = rel;
1014         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015         if (seek_by_bytes)
1016             is->seek_flags |= AVSEEK_FLAG_BYTE;
1017         is->seek_req = 1;
1018     }
1019 }
1020
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024     if (is->paused) {
1025         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026         if(is->read_pause_return != AVERROR(ENOSYS)){
1027             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1028         }
1029         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1030     }
1031     is->paused = !is->paused;
1032 }
1033
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036     double delay, sync_threshold, diff;
1037
1038     /* compute nominal delay */
1039     delay = frame_current_pts - is->frame_last_pts;
1040     if (delay <= 0 || delay >= 10.0) {
1041         /* if incorrect delay, use previous one */
1042         delay = is->frame_last_delay;
1043     } else {
1044         is->frame_last_delay = delay;
1045     }
1046     is->frame_last_pts = frame_current_pts;
1047
1048     /* update delay to follow master synchronisation source */
1049     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1050          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1051         /* if video is slave, we try to correct big delays by
1052            duplicating or deleting a frame */
1053         diff = get_video_clock(is) - get_master_clock(is);
1054
1055         /* skip or repeat frame. We take into account the
1056            delay to compute the threshold. I still don't know
1057            if it is the best guess */
1058         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060             if (diff <= -sync_threshold)
1061                 delay = 0;
1062             else if (diff >= sync_threshold)
1063                 delay = 2 * delay;
1064         }
1065     }
1066     is->frame_timer += delay;
1067
1068     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069             delay, frame_current_pts, -diff);
1070
1071     return is->frame_timer;
1072 }
1073
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077     VideoState *is = opaque;
1078     VideoPicture *vp;
1079
1080     SubPicture *sp, *sp2;
1081
1082     if (is->video_st) {
1083 retry:
1084         if (is->pictq_size == 0) {
1085             //nothing to do, no picture to display in the que
1086         } else {
1087             double time= av_gettime()/1000000.0;
1088             double next_target;
1089             /* dequeue the picture */
1090             vp = &is->pictq[is->pictq_rindex];
1091
1092             if(time < vp->target_clock)
1093                 return;
1094             /* update current video pts */
1095             is->video_current_pts = vp->pts;
1096             is->video_current_pts_drift = is->video_current_pts - time;
1097             is->video_current_pos = vp->pos;
1098             if(is->pictq_size > 1){
1099                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1100                 assert(nextvp->target_clock >= vp->target_clock);
1101                 next_target= nextvp->target_clock;
1102             }else{
1103                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1104             }
1105             if(framedrop && time > next_target){
1106                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107                 if(is->pictq_size > 1 || time > next_target + 0.5){
1108                     /* update queue size and signal for next picture */
1109                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110                         is->pictq_rindex = 0;
1111
1112                     SDL_LockMutex(is->pictq_mutex);
1113                     is->pictq_size--;
1114                     SDL_CondSignal(is->pictq_cond);
1115                     SDL_UnlockMutex(is->pictq_mutex);
1116                     goto retry;
1117                 }
1118             }
1119
1120             if(is->subtitle_st) {
1121                 if (is->subtitle_stream_changed) {
1122                     SDL_LockMutex(is->subpq_mutex);
1123
1124                     while (is->subpq_size) {
1125                         free_subpicture(&is->subpq[is->subpq_rindex]);
1126
1127                         /* update queue size and signal for next picture */
1128                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129                             is->subpq_rindex = 0;
1130
1131                         is->subpq_size--;
1132                     }
1133                     is->subtitle_stream_changed = 0;
1134
1135                     SDL_CondSignal(is->subpq_cond);
1136                     SDL_UnlockMutex(is->subpq_mutex);
1137                 } else {
1138                     if (is->subpq_size > 0) {
1139                         sp = &is->subpq[is->subpq_rindex];
1140
1141                         if (is->subpq_size > 1)
1142                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143                         else
1144                             sp2 = NULL;
1145
1146                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148                         {
1149                             free_subpicture(sp);
1150
1151                             /* update queue size and signal for next picture */
1152                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153                                 is->subpq_rindex = 0;
1154
1155                             SDL_LockMutex(is->subpq_mutex);
1156                             is->subpq_size--;
1157                             SDL_CondSignal(is->subpq_cond);
1158                             SDL_UnlockMutex(is->subpq_mutex);
1159                         }
1160                     }
1161                 }
1162             }
1163
1164             /* display picture */
1165             if (!display_disable)
1166                 video_display(is);
1167
1168             /* update queue size and signal for next picture */
1169             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1170                 is->pictq_rindex = 0;
1171
1172             SDL_LockMutex(is->pictq_mutex);
1173             is->pictq_size--;
1174             SDL_CondSignal(is->pictq_cond);
1175             SDL_UnlockMutex(is->pictq_mutex);
1176         }
1177     } else if (is->audio_st) {
1178         /* draw the next audio frame */
1179
1180         /* if only audio stream, then display the audio bars (better
1181            than nothing, just to test the implementation */
1182
1183         /* display picture */
1184         if (!display_disable)
1185             video_display(is);
1186     }
1187     if (show_status) {
1188         static int64_t last_time;
1189         int64_t cur_time;
1190         int aqsize, vqsize, sqsize;
1191         double av_diff;
1192
1193         cur_time = av_gettime();
1194         if (!last_time || (cur_time - last_time) >= 30000) {
1195             aqsize = 0;
1196             vqsize = 0;
1197             sqsize = 0;
1198             if (is->audio_st)
1199                 aqsize = is->audioq.size;
1200             if (is->video_st)
1201                 vqsize = is->videoq.size;
1202             if (is->subtitle_st)
1203                 sqsize = is->subtitleq.size;
1204             av_diff = 0;
1205             if (is->audio_st && is->video_st)
1206                 av_diff = get_audio_clock(is) - get_video_clock(is);
1207             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1208                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1209             fflush(stdout);
1210             last_time = cur_time;
1211         }
1212     }
1213 }
1214
1215 static void stream_close(VideoState *is)
1216 {
1217     VideoPicture *vp;
1218     int i;
1219     /* XXX: use a special url_shutdown call to abort parse cleanly */
1220     is->abort_request = 1;
1221     SDL_WaitThread(is->parse_tid, NULL);
1222     SDL_WaitThread(is->refresh_tid, NULL);
1223
1224     /* free all pictures */
1225     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1226         vp = &is->pictq[i];
1227 #if CONFIG_AVFILTER
1228         if (vp->picref) {
1229             avfilter_unref_buffer(vp->picref);
1230             vp->picref = NULL;
1231         }
1232 #endif
1233         if (vp->bmp) {
1234             SDL_FreeYUVOverlay(vp->bmp);
1235             vp->bmp = NULL;
1236         }
1237     }
1238     SDL_DestroyMutex(is->pictq_mutex);
1239     SDL_DestroyCond(is->pictq_cond);
1240     SDL_DestroyMutex(is->subpq_mutex);
1241     SDL_DestroyCond(is->subpq_cond);
1242 #if !CONFIG_AVFILTER
1243     if (is->img_convert_ctx)
1244         sws_freeContext(is->img_convert_ctx);
1245 #endif
1246     av_free(is);
1247 }
1248
1249 static void do_exit(void)
1250 {
1251     if (cur_stream) {
1252         stream_close(cur_stream);
1253         cur_stream = NULL;
1254     }
1255     uninit_opts();
1256 #if CONFIG_AVFILTER
1257     avfilter_uninit();
1258 #endif
1259     if (show_status)
1260         printf("\n");
1261     SDL_Quit();
1262     av_log(NULL, AV_LOG_QUIET, "");
1263     exit(0);
1264 }
1265
1266 /* allocate a picture (needs to do that in main thread to avoid
1267    potential locking problems */
1268 static void alloc_picture(void *opaque)
1269 {
1270     VideoState *is = opaque;
1271     VideoPicture *vp;
1272
1273     vp = &is->pictq[is->pictq_windex];
1274
1275     if (vp->bmp)
1276         SDL_FreeYUVOverlay(vp->bmp);
1277
1278 #if CONFIG_AVFILTER
1279     if (vp->picref)
1280         avfilter_unref_buffer(vp->picref);
1281     vp->picref = NULL;
1282
1283     vp->width   = is->out_video_filter->inputs[0]->w;
1284     vp->height  = is->out_video_filter->inputs[0]->h;
1285     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1286 #else
1287     vp->width   = is->video_st->codec->width;
1288     vp->height  = is->video_st->codec->height;
1289     vp->pix_fmt = is->video_st->codec->pix_fmt;
1290 #endif
1291
1292     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1293                                    SDL_YV12_OVERLAY,
1294                                    screen);
1295     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1296         /* SDL allocates a buffer smaller than requested if the video
1297          * overlay hardware is unable to support the requested size. */
1298         fprintf(stderr, "Error: the video system does not support an image\n"
1299                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1300                         "to reduce the image size.\n", vp->width, vp->height );
1301         do_exit();
1302     }
1303
1304     SDL_LockMutex(is->pictq_mutex);
1305     vp->allocated = 1;
1306     SDL_CondSignal(is->pictq_cond);
1307     SDL_UnlockMutex(is->pictq_mutex);
1308 }
1309
1310 /**
1311  *
1312  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1313  */
1314 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1315 {
1316     VideoPicture *vp;
1317 #if CONFIG_AVFILTER
1318     AVPicture pict_src;
1319 #else
1320     int dst_pix_fmt = PIX_FMT_YUV420P;
1321 #endif
1322     /* wait until we have space to put a new picture */
1323     SDL_LockMutex(is->pictq_mutex);
1324
1325     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1326         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1327
1328     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1329            !is->videoq.abort_request) {
1330         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1331     }
1332     SDL_UnlockMutex(is->pictq_mutex);
1333
1334     if (is->videoq.abort_request)
1335         return -1;
1336
1337     vp = &is->pictq[is->pictq_windex];
1338
1339     /* alloc or resize hardware picture buffer */
1340     if (!vp->bmp ||
1341 #if CONFIG_AVFILTER
1342         vp->width  != is->out_video_filter->inputs[0]->w ||
1343         vp->height != is->out_video_filter->inputs[0]->h) {
1344 #else
1345         vp->width != is->video_st->codec->width ||
1346         vp->height != is->video_st->codec->height) {
1347 #endif
1348         SDL_Event event;
1349
1350         vp->allocated = 0;
1351
1352         /* the allocation must be done in the main thread to avoid
1353            locking problems */
1354         event.type = FF_ALLOC_EVENT;
1355         event.user.data1 = is;
1356         SDL_PushEvent(&event);
1357
1358         /* wait until the picture is allocated */
1359         SDL_LockMutex(is->pictq_mutex);
1360         while (!vp->allocated && !is->videoq.abort_request) {
1361             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1362         }
1363         SDL_UnlockMutex(is->pictq_mutex);
1364
1365         if (is->videoq.abort_request)
1366             return -1;
1367     }
1368
1369     /* if the frame is not skipped, then display it */
1370     if (vp->bmp) {
1371         AVPicture pict;
1372 #if CONFIG_AVFILTER
1373         if(vp->picref)
1374             avfilter_unref_buffer(vp->picref);
1375         vp->picref = src_frame->opaque;
1376 #endif
1377
1378         /* get a pointer on the bitmap */
1379         SDL_LockYUVOverlay (vp->bmp);
1380
1381         memset(&pict,0,sizeof(AVPicture));
1382         pict.data[0] = vp->bmp->pixels[0];
1383         pict.data[1] = vp->bmp->pixels[2];
1384         pict.data[2] = vp->bmp->pixels[1];
1385
1386         pict.linesize[0] = vp->bmp->pitches[0];
1387         pict.linesize[1] = vp->bmp->pitches[2];
1388         pict.linesize[2] = vp->bmp->pitches[1];
1389
1390 #if CONFIG_AVFILTER
1391         pict_src.data[0] = src_frame->data[0];
1392         pict_src.data[1] = src_frame->data[1];
1393         pict_src.data[2] = src_frame->data[2];
1394
1395         pict_src.linesize[0] = src_frame->linesize[0];
1396         pict_src.linesize[1] = src_frame->linesize[1];
1397         pict_src.linesize[2] = src_frame->linesize[2];
1398
1399         //FIXME use direct rendering
1400         av_picture_copy(&pict, &pict_src,
1401                         vp->pix_fmt, vp->width, vp->height);
1402 #else
1403         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1404         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1405             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1406             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1407         if (is->img_convert_ctx == NULL) {
1408             fprintf(stderr, "Cannot initialize the conversion context\n");
1409             exit(1);
1410         }
1411         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1412                   0, vp->height, pict.data, pict.linesize);
1413 #endif
1414         /* update the bitmap content */
1415         SDL_UnlockYUVOverlay(vp->bmp);
1416
1417         vp->pts = pts;
1418         vp->pos = pos;
1419
1420         /* now we can update the picture count */
1421         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422             is->pictq_windex = 0;
1423         SDL_LockMutex(is->pictq_mutex);
1424         vp->target_clock= compute_target_time(vp->pts, is);
1425
1426         is->pictq_size++;
1427         SDL_UnlockMutex(is->pictq_mutex);
1428     }
1429     return 0;
1430 }
1431
1432 /**
1433  * compute the exact PTS for the picture if it is omitted in the stream
1434  * @param pts1 the dts of the pkt / pts of the frame
1435  */
1436 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1437 {
1438     double frame_delay, pts;
1439
1440     pts = pts1;
1441
1442     if (pts != 0) {
1443         /* update video clock with pts, if present */
1444         is->video_clock = pts;
1445     } else {
1446         pts = is->video_clock;
1447     }
1448     /* update video clock for next frame */
1449     frame_delay = av_q2d(is->video_st->codec->time_base);
1450     /* for MPEG2, the frame can be repeated, so we update the
1451        clock accordingly */
1452     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1453     is->video_clock += frame_delay;
1454
1455     return queue_picture(is, src_frame, pts, pos);
1456 }
1457
1458 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1459 {
1460     int got_picture, i;
1461
1462     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1463         return -1;
1464
1465     if (pkt->data == flush_pkt.data) {
1466         avcodec_flush_buffers(is->video_st->codec);
1467
1468         SDL_LockMutex(is->pictq_mutex);
1469         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1470         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1471             is->pictq[i].target_clock= 0;
1472         }
1473         while (is->pictq_size && !is->videoq.abort_request) {
1474             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1475         }
1476         is->video_current_pos = -1;
1477         SDL_UnlockMutex(is->pictq_mutex);
1478
1479         init_pts_correction(&is->pts_ctx);
1480         is->frame_last_pts = AV_NOPTS_VALUE;
1481         is->frame_last_delay = 0;
1482         is->frame_timer = (double)av_gettime() / 1000000.0;
1483         is->skip_frames = 1;
1484         is->skip_frames_index = 0;
1485         return 0;
1486     }
1487
1488     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1489
1490     if (got_picture) {
1491         if (decoder_reorder_pts == -1) {
1492             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1493         } else if (decoder_reorder_pts) {
1494             *pts = frame->pkt_pts;
1495         } else {
1496             *pts = frame->pkt_dts;
1497         }
1498
1499         if (*pts == AV_NOPTS_VALUE) {
1500             *pts = 0;
1501         }
1502
1503         is->skip_frames_index += 1;
1504         if(is->skip_frames_index >= is->skip_frames){
1505             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1506             return 1;
1507         }
1508
1509     }
1510     return 0;
1511 }
1512
1513 #if CONFIG_AVFILTER
1514 typedef struct {
1515     VideoState *is;
1516     AVFrame *frame;
1517     int use_dr1;
1518 } FilterPriv;
1519
1520 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1521 {
1522     AVFilterContext *ctx = codec->opaque;
1523     AVFilterBufferRef  *ref;
1524     int perms = AV_PERM_WRITE;
1525     int i, w, h, stride[4];
1526     unsigned edge;
1527     int pixel_size;
1528
1529     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1530         perms |= AV_PERM_NEG_LINESIZES;
1531
1532     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1533         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1534         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1535         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1536     }
1537     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1538
1539     w = codec->width;
1540     h = codec->height;
1541     avcodec_align_dimensions2(codec, &w, &h, stride);
1542     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1543     w += edge << 1;
1544     h += edge << 1;
1545
1546     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1547         return -1;
1548
1549     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1550     ref->video->w = codec->width;
1551     ref->video->h = codec->height;
1552     for(i = 0; i < 4; i ++) {
1553         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1554         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1555
1556         if (ref->data[i]) {
1557             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1558         }
1559         pic->data[i]     = ref->data[i];
1560         pic->linesize[i] = ref->linesize[i];
1561     }
1562     pic->opaque = ref;
1563     pic->age    = INT_MAX;
1564     pic->type   = FF_BUFFER_TYPE_USER;
1565     pic->reordered_opaque = codec->reordered_opaque;
1566     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1567     else           pic->pkt_pts = AV_NOPTS_VALUE;
1568     return 0;
1569 }
1570
1571 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1572 {
1573     memset(pic->data, 0, sizeof(pic->data));
1574     avfilter_unref_buffer(pic->opaque);
1575 }
1576
1577 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1578 {
1579     AVFilterBufferRef *ref = pic->opaque;
1580
1581     if (pic->data[0] == NULL) {
1582         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1583         return codec->get_buffer(codec, pic);
1584     }
1585
1586     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1587         (codec->pix_fmt != ref->format)) {
1588         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1589         return -1;
1590     }
1591
1592     pic->reordered_opaque = codec->reordered_opaque;
1593     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1594     else           pic->pkt_pts = AV_NOPTS_VALUE;
1595     return 0;
1596 }
1597
1598 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1599 {
1600     FilterPriv *priv = ctx->priv;
1601     AVCodecContext *codec;
1602     if(!opaque) return -1;
1603
1604     priv->is = opaque;
1605     codec    = priv->is->video_st->codec;
1606     codec->opaque = ctx;
1607     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1608         priv->use_dr1 = 1;
1609         codec->get_buffer     = input_get_buffer;
1610         codec->release_buffer = input_release_buffer;
1611         codec->reget_buffer   = input_reget_buffer;
1612         codec->thread_safe_callbacks = 1;
1613     }
1614
1615     priv->frame = avcodec_alloc_frame();
1616
1617     return 0;
1618 }
1619
1620 static void input_uninit(AVFilterContext *ctx)
1621 {
1622     FilterPriv *priv = ctx->priv;
1623     av_free(priv->frame);
1624 }
1625
1626 static int input_request_frame(AVFilterLink *link)
1627 {
1628     FilterPriv *priv = link->src->priv;
1629     AVFilterBufferRef *picref;
1630     int64_t pts = 0;
1631     AVPacket pkt;
1632     int ret;
1633
1634     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1635         av_free_packet(&pkt);
1636     if (ret < 0)
1637         return -1;
1638
1639     if(priv->use_dr1) {
1640         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1641     } else {
1642         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1643         av_image_copy(picref->data, picref->linesize,
1644                       priv->frame->data, priv->frame->linesize,
1645                       picref->format, link->w, link->h);
1646     }
1647     av_free_packet(&pkt);
1648
1649     picref->pts = pts;
1650     picref->pos = pkt.pos;
1651     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1652     avfilter_start_frame(link, picref);
1653     avfilter_draw_slice(link, 0, link->h, 1);
1654     avfilter_end_frame(link);
1655
1656     return 0;
1657 }
1658
1659 static int input_query_formats(AVFilterContext *ctx)
1660 {
1661     FilterPriv *priv = ctx->priv;
1662     enum PixelFormat pix_fmts[] = {
1663         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1664     };
1665
1666     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1667     return 0;
1668 }
1669
1670 static int input_config_props(AVFilterLink *link)
1671 {
1672     FilterPriv *priv  = link->src->priv;
1673     AVCodecContext *c = priv->is->video_st->codec;
1674
1675     link->w = c->width;
1676     link->h = c->height;
1677     link->time_base = priv->is->video_st->time_base;
1678
1679     return 0;
1680 }
1681
1682 static AVFilter input_filter =
1683 {
1684     .name      = "ffplay_input",
1685
1686     .priv_size = sizeof(FilterPriv),
1687
1688     .init      = input_init,
1689     .uninit    = input_uninit,
1690
1691     .query_formats = input_query_formats,
1692
1693     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1694     .outputs   = (AVFilterPad[]) {{ .name = "default",
1695                                     .type = AVMEDIA_TYPE_VIDEO,
1696                                     .request_frame = input_request_frame,
1697                                     .config_props  = input_config_props, },
1698                                   { .name = NULL }},
1699 };
1700
1701 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1702 {
1703     char sws_flags_str[128];
1704     int ret;
1705     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1706     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1707     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1708     graph->scale_sws_opts = av_strdup(sws_flags_str);
1709
1710     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1711                                             NULL, is, graph)) < 0)
1712         goto the_end;
1713     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1714                                             NULL, &ffsink_ctx, graph)) < 0)
1715         goto the_end;
1716
1717     if(vfilters) {
1718         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1719         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1720
1721         outputs->name    = av_strdup("in");
1722         outputs->filter_ctx = filt_src;
1723         outputs->pad_idx = 0;
1724         outputs->next    = NULL;
1725
1726         inputs->name    = av_strdup("out");
1727         inputs->filter_ctx = filt_out;
1728         inputs->pad_idx = 0;
1729         inputs->next    = NULL;
1730
1731         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1732             goto the_end;
1733         av_freep(&vfilters);
1734     } else {
1735         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1736             goto the_end;
1737     }
1738
1739     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1740         goto the_end;
1741
1742     is->out_video_filter = filt_out;
1743 the_end:
1744     return ret;
1745 }
1746
1747 #endif  /* CONFIG_AVFILTER */
1748
1749 static int video_thread(void *arg)
1750 {
1751     VideoState *is = arg;
1752     AVFrame *frame= avcodec_alloc_frame();
1753     int64_t pts_int;
1754     double pts;
1755     int ret;
1756
1757 #if CONFIG_AVFILTER
1758     AVFilterGraph *graph = avfilter_graph_alloc();
1759     AVFilterContext *filt_out = NULL;
1760     int64_t pos;
1761
1762     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1763         goto the_end;
1764     filt_out = is->out_video_filter;
1765 #endif
1766
1767     for(;;) {
1768 #if !CONFIG_AVFILTER
1769         AVPacket pkt;
1770 #else
1771         AVFilterBufferRef *picref;
1772         AVRational tb;
1773 #endif
1774         while (is->paused && !is->videoq.abort_request)
1775             SDL_Delay(10);
1776 #if CONFIG_AVFILTER
1777         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1778         if (picref) {
1779             pts_int = picref->pts;
1780             pos     = picref->pos;
1781             frame->opaque = picref;
1782         }
1783
1784         if (av_cmp_q(tb, is->video_st->time_base)) {
1785             av_unused int64_t pts1 = pts_int;
1786             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1787             av_dlog(NULL, "video_thread(): "
1788                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1789                     tb.num, tb.den, pts1,
1790                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1791         }
1792 #else
1793         ret = get_video_frame(is, frame, &pts_int, &pkt);
1794 #endif
1795
1796         if (ret < 0) goto the_end;
1797
1798         if (!ret)
1799             continue;
1800
1801         pts = pts_int*av_q2d(is->video_st->time_base);
1802
1803 #if CONFIG_AVFILTER
1804         ret = output_picture2(is, frame, pts, pos);
1805 #else
1806         ret = output_picture2(is, frame, pts,  pkt.pos);
1807         av_free_packet(&pkt);
1808 #endif
1809         if (ret < 0)
1810             goto the_end;
1811
1812         if (step)
1813             if (cur_stream)
1814                 stream_pause(cur_stream);
1815     }
1816  the_end:
1817 #if CONFIG_AVFILTER
1818     avfilter_graph_free(&graph);
1819 #endif
1820     av_free(frame);
1821     return 0;
1822 }
1823
1824 static int subtitle_thread(void *arg)
1825 {
1826     VideoState *is = arg;
1827     SubPicture *sp;
1828     AVPacket pkt1, *pkt = &pkt1;
1829     int got_subtitle;
1830     double pts;
1831     int i, j;
1832     int r, g, b, y, u, v, a;
1833
1834     for(;;) {
1835         while (is->paused && !is->subtitleq.abort_request) {
1836             SDL_Delay(10);
1837         }
1838         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1839             break;
1840
1841         if(pkt->data == flush_pkt.data){
1842             avcodec_flush_buffers(is->subtitle_st->codec);
1843             continue;
1844         }
1845         SDL_LockMutex(is->subpq_mutex);
1846         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1847                !is->subtitleq.abort_request) {
1848             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1849         }
1850         SDL_UnlockMutex(is->subpq_mutex);
1851
1852         if (is->subtitleq.abort_request)
1853             goto the_end;
1854
1855         sp = &is->subpq[is->subpq_windex];
1856
1857        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1858            this packet, if any */
1859         pts = 0;
1860         if (pkt->pts != AV_NOPTS_VALUE)
1861             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1862
1863         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1864                                  &got_subtitle, pkt);
1865
1866         if (got_subtitle && sp->sub.format == 0) {
1867             sp->pts = pts;
1868
1869             for (i = 0; i < sp->sub.num_rects; i++)
1870             {
1871                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1872                 {
1873                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1874                     y = RGB_TO_Y_CCIR(r, g, b);
1875                     u = RGB_TO_U_CCIR(r, g, b, 0);
1876                     v = RGB_TO_V_CCIR(r, g, b, 0);
1877                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1878                 }
1879             }
1880
1881             /* now we can update the picture count */
1882             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1883                 is->subpq_windex = 0;
1884             SDL_LockMutex(is->subpq_mutex);
1885             is->subpq_size++;
1886             SDL_UnlockMutex(is->subpq_mutex);
1887         }
1888         av_free_packet(pkt);
1889     }
1890  the_end:
1891     return 0;
1892 }
1893
1894 /* copy samples for viewing in editor window */
1895 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1896 {
1897     int size, len;
1898
1899     size = samples_size / sizeof(short);
1900     while (size > 0) {
1901         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1902         if (len > size)
1903             len = size;
1904         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1905         samples += len;
1906         is->sample_array_index += len;
1907         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1908             is->sample_array_index = 0;
1909         size -= len;
1910     }
1911 }
1912
1913 /* return the new audio buffer size (samples can be added or deleted
1914    to get better sync if video or external master clock) */
1915 static int synchronize_audio(VideoState *is, short *samples,
1916                              int samples_size1, double pts)
1917 {
1918     int n, samples_size;
1919     double ref_clock;
1920
1921     n = 2 * is->audio_st->codec->channels;
1922     samples_size = samples_size1;
1923
1924     /* if not master, then we try to remove or add samples to correct the clock */
1925     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1926          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1927         double diff, avg_diff;
1928         int wanted_size, min_size, max_size, nb_samples;
1929
1930         ref_clock = get_master_clock(is);
1931         diff = get_audio_clock(is) - ref_clock;
1932
1933         if (diff < AV_NOSYNC_THRESHOLD) {
1934             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1935             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1936                 /* not enough measures to have a correct estimate */
1937                 is->audio_diff_avg_count++;
1938             } else {
1939                 /* estimate the A-V difference */
1940                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1941
1942                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1943                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1944                     nb_samples = samples_size / n;
1945
1946                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1947                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1948                     if (wanted_size < min_size)
1949                         wanted_size = min_size;
1950                     else if (wanted_size > max_size)
1951                         wanted_size = max_size;
1952
1953                     /* add or remove samples to correction the synchro */
1954                     if (wanted_size < samples_size) {
1955                         /* remove samples */
1956                         samples_size = wanted_size;
1957                     } else if (wanted_size > samples_size) {
1958                         uint8_t *samples_end, *q;
1959                         int nb;
1960
1961                         /* add samples */
1962                         nb = (samples_size - wanted_size);
1963                         samples_end = (uint8_t *)samples + samples_size - n;
1964                         q = samples_end + n;
1965                         while (nb > 0) {
1966                             memcpy(q, samples_end, n);
1967                             q += n;
1968                             nb -= n;
1969                         }
1970                         samples_size = wanted_size;
1971                     }
1972                 }
1973                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1974                         diff, avg_diff, samples_size - samples_size1,
1975                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1976             }
1977         } else {
1978             /* too big difference : may be initial PTS errors, so
1979                reset A-V filter */
1980             is->audio_diff_avg_count = 0;
1981             is->audio_diff_cum = 0;
1982         }
1983     }
1984
1985     return samples_size;
1986 }
1987
1988 /* decode one audio frame and returns its uncompressed size */
1989 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1990 {
1991     AVPacket *pkt_temp = &is->audio_pkt_temp;
1992     AVPacket *pkt = &is->audio_pkt;
1993     AVCodecContext *dec= is->audio_st->codec;
1994     int n, len1, data_size;
1995     double pts;
1996
1997     for(;;) {
1998         /* NOTE: the audio packet can contain several frames */
1999         while (pkt_temp->size > 0) {
2000             data_size = sizeof(is->audio_buf1);
2001             len1 = avcodec_decode_audio3(dec,
2002                                         (int16_t *)is->audio_buf1, &data_size,
2003                                         pkt_temp);
2004             if (len1 < 0) {
2005                 /* if error, we skip the frame */
2006                 pkt_temp->size = 0;
2007                 break;
2008             }
2009
2010             pkt_temp->data += len1;
2011             pkt_temp->size -= len1;
2012             if (data_size <= 0)
2013                 continue;
2014
2015             if (dec->sample_fmt != is->audio_src_fmt) {
2016                 if (is->reformat_ctx)
2017                     av_audio_convert_free(is->reformat_ctx);
2018                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2019                                                          dec->sample_fmt, 1, NULL, 0);
2020                 if (!is->reformat_ctx) {
2021                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2022                         av_get_sample_fmt_name(dec->sample_fmt),
2023                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2024                         break;
2025                 }
2026                 is->audio_src_fmt= dec->sample_fmt;
2027             }
2028
2029             if (is->reformat_ctx) {
2030                 const void *ibuf[6]= {is->audio_buf1};
2031                 void *obuf[6]= {is->audio_buf2};
2032                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2033                 int ostride[6]= {2};
2034                 int len= data_size/istride[0];
2035                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2036                     printf("av_audio_convert() failed\n");
2037                     break;
2038                 }
2039                 is->audio_buf= is->audio_buf2;
2040                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2041                           remove this legacy cruft */
2042                 data_size= len*2;
2043             }else{
2044                 is->audio_buf= is->audio_buf1;
2045             }
2046
2047             /* if no pts, then compute it */
2048             pts = is->audio_clock;
2049             *pts_ptr = pts;
2050             n = 2 * dec->channels;
2051             is->audio_clock += (double)data_size /
2052                 (double)(n * dec->sample_rate);
2053 #ifdef DEBUG
2054             {
2055                 static double last_clock;
2056                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2057                        is->audio_clock - last_clock,
2058                        is->audio_clock, pts);
2059                 last_clock = is->audio_clock;
2060             }
2061 #endif
2062             return data_size;
2063         }
2064
2065         /* free the current packet */
2066         if (pkt->data)
2067             av_free_packet(pkt);
2068
2069         if (is->paused || is->audioq.abort_request) {
2070             return -1;
2071         }
2072
2073         /* read next packet */
2074         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2075             return -1;
2076         if(pkt->data == flush_pkt.data){
2077             avcodec_flush_buffers(dec);
2078             continue;
2079         }
2080
2081         pkt_temp->data = pkt->data;
2082         pkt_temp->size = pkt->size;
2083
2084         /* if update the audio clock with the pts */
2085         if (pkt->pts != AV_NOPTS_VALUE) {
2086             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2087         }
2088     }
2089 }
2090
2091 /* prepare a new audio buffer */
2092 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2093 {
2094     VideoState *is = opaque;
2095     int audio_size, len1;
2096     double pts;
2097
2098     audio_callback_time = av_gettime();
2099
2100     while (len > 0) {
2101         if (is->audio_buf_index >= is->audio_buf_size) {
2102            audio_size = audio_decode_frame(is, &pts);
2103            if (audio_size < 0) {
2104                 /* if error, just output silence */
2105                is->audio_buf = is->audio_buf1;
2106                is->audio_buf_size = 1024;
2107                memset(is->audio_buf, 0, is->audio_buf_size);
2108            } else {
2109                if (is->show_audio)
2110                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2111                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2112                                               pts);
2113                is->audio_buf_size = audio_size;
2114            }
2115            is->audio_buf_index = 0;
2116         }
2117         len1 = is->audio_buf_size - is->audio_buf_index;
2118         if (len1 > len)
2119             len1 = len;
2120         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2121         len -= len1;
2122         stream += len1;
2123         is->audio_buf_index += len1;
2124     }
2125 }
2126
2127 /* open a given stream. Return 0 if OK */
2128 static int stream_component_open(VideoState *is, int stream_index)
2129 {
2130     AVFormatContext *ic = is->ic;
2131     AVCodecContext *avctx;
2132     AVCodec *codec;
2133     SDL_AudioSpec wanted_spec, spec;
2134
2135     if (stream_index < 0 || stream_index >= ic->nb_streams)
2136         return -1;
2137     avctx = ic->streams[stream_index]->codec;
2138
2139     /* prepare audio output */
2140     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2141         if (avctx->channels > 0) {
2142             avctx->request_channels = FFMIN(2, avctx->channels);
2143         } else {
2144             avctx->request_channels = 2;
2145         }
2146     }
2147
2148     codec = avcodec_find_decoder(avctx->codec_id);
2149     avctx->debug_mv = debug_mv;
2150     avctx->debug = debug;
2151     avctx->workaround_bugs = workaround_bugs;
2152     avctx->lowres = lowres;
2153     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2154     avctx->idct_algo= idct;
2155     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2156     avctx->skip_frame= skip_frame;
2157     avctx->skip_idct= skip_idct;
2158     avctx->skip_loop_filter= skip_loop_filter;
2159     avctx->error_recognition= error_recognition;
2160     avctx->error_concealment= error_concealment;
2161     avctx->thread_count= thread_count;
2162
2163     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2164
2165     if (!codec ||
2166         avcodec_open(avctx, codec) < 0)
2167         return -1;
2168
2169     /* prepare audio output */
2170     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2171         wanted_spec.freq = avctx->sample_rate;
2172         wanted_spec.format = AUDIO_S16SYS;
2173         wanted_spec.channels = avctx->channels;
2174         wanted_spec.silence = 0;
2175         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2176         wanted_spec.callback = sdl_audio_callback;
2177         wanted_spec.userdata = is;
2178         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2179             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2180             return -1;
2181         }
2182         is->audio_hw_buf_size = spec.size;
2183         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2184     }
2185
2186     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2187     switch(avctx->codec_type) {
2188     case AVMEDIA_TYPE_AUDIO:
2189         is->audio_stream = stream_index;
2190         is->audio_st = ic->streams[stream_index];
2191         is->audio_buf_size = 0;
2192         is->audio_buf_index = 0;
2193
2194         /* init averaging filter */
2195         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2196         is->audio_diff_avg_count = 0;
2197         /* since we do not have a precise anough audio fifo fullness,
2198            we correct audio sync only if larger than this threshold */
2199         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2200
2201         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2202         packet_queue_init(&is->audioq);
2203         SDL_PauseAudio(0);
2204         break;
2205     case AVMEDIA_TYPE_VIDEO:
2206         is->video_stream = stream_index;
2207         is->video_st = ic->streams[stream_index];
2208
2209         packet_queue_init(&is->videoq);
2210         is->video_tid = SDL_CreateThread(video_thread, is);
2211         break;
2212     case AVMEDIA_TYPE_SUBTITLE:
2213         is->subtitle_stream = stream_index;
2214         is->subtitle_st = ic->streams[stream_index];
2215         packet_queue_init(&is->subtitleq);
2216
2217         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2218         break;
2219     default:
2220         break;
2221     }
2222     return 0;
2223 }
2224
2225 static void stream_component_close(VideoState *is, int stream_index)
2226 {
2227     AVFormatContext *ic = is->ic;
2228     AVCodecContext *avctx;
2229
2230     if (stream_index < 0 || stream_index >= ic->nb_streams)
2231         return;
2232     avctx = ic->streams[stream_index]->codec;
2233
2234     switch(avctx->codec_type) {
2235     case AVMEDIA_TYPE_AUDIO:
2236         packet_queue_abort(&is->audioq);
2237
2238         SDL_CloseAudio();
2239
2240         packet_queue_end(&is->audioq);
2241         if (is->reformat_ctx)
2242             av_audio_convert_free(is->reformat_ctx);
2243         is->reformat_ctx = NULL;
2244         break;
2245     case AVMEDIA_TYPE_VIDEO:
2246         packet_queue_abort(&is->videoq);
2247
2248         /* note: we also signal this mutex to make sure we deblock the
2249            video thread in all cases */
2250         SDL_LockMutex(is->pictq_mutex);
2251         SDL_CondSignal(is->pictq_cond);
2252         SDL_UnlockMutex(is->pictq_mutex);
2253
2254         SDL_WaitThread(is->video_tid, NULL);
2255
2256         packet_queue_end(&is->videoq);
2257         break;
2258     case AVMEDIA_TYPE_SUBTITLE:
2259         packet_queue_abort(&is->subtitleq);
2260
2261         /* note: we also signal this mutex to make sure we deblock the
2262            video thread in all cases */
2263         SDL_LockMutex(is->subpq_mutex);
2264         is->subtitle_stream_changed = 1;
2265
2266         SDL_CondSignal(is->subpq_cond);
2267         SDL_UnlockMutex(is->subpq_mutex);
2268
2269         SDL_WaitThread(is->subtitle_tid, NULL);
2270
2271         packet_queue_end(&is->subtitleq);
2272         break;
2273     default:
2274         break;
2275     }
2276
2277     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2278     avcodec_close(avctx);
2279     switch(avctx->codec_type) {
2280     case AVMEDIA_TYPE_AUDIO:
2281         is->audio_st = NULL;
2282         is->audio_stream = -1;
2283         break;
2284     case AVMEDIA_TYPE_VIDEO:
2285         is->video_st = NULL;
2286         is->video_stream = -1;
2287         break;
2288     case AVMEDIA_TYPE_SUBTITLE:
2289         is->subtitle_st = NULL;
2290         is->subtitle_stream = -1;
2291         break;
2292     default:
2293         break;
2294     }
2295 }
2296
2297 /* since we have only one decoding thread, we can use a global
2298    variable instead of a thread local variable */
2299 static VideoState *global_video_state;
2300
2301 static int decode_interrupt_cb(void)
2302 {
2303     return (global_video_state && global_video_state->abort_request);
2304 }
2305
2306 /* this thread gets the stream from the disk or the network */
2307 static int decode_thread(void *arg)
2308 {
2309     VideoState *is = arg;
2310     AVFormatContext *ic = NULL;
2311     int err, i, ret;
2312     int st_index[AVMEDIA_TYPE_NB];
2313     AVPacket pkt1, *pkt = &pkt1;
2314     int eof=0;
2315     int pkt_in_play_range = 0;
2316     AVDictionaryEntry *t;
2317
2318     memset(st_index, -1, sizeof(st_index));
2319     is->video_stream = -1;
2320     is->audio_stream = -1;
2321     is->subtitle_stream = -1;
2322
2323     global_video_state = is;
2324     avio_set_interrupt_cb(decode_interrupt_cb);
2325
2326     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2327     if (err < 0) {
2328         print_error(is->filename, err);
2329         ret = -1;
2330         goto fail;
2331     }
2332     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2333         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2334         ret = AVERROR_OPTION_NOT_FOUND;
2335         goto fail;
2336     }
2337     is->ic = ic;
2338
2339     if(genpts)
2340         ic->flags |= AVFMT_FLAG_GENPTS;
2341
2342     /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2343     for (i = 0; i < ic->nb_streams; i++) {
2344         AVCodecContext *dec = ic->streams[i]->codec;
2345         switch (dec->codec_type) {
2346         case AVMEDIA_TYPE_AUDIO:
2347             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2348                              AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2349                              NULL);
2350             break;
2351         case AVMEDIA_TYPE_VIDEO:
2352             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2353                              AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2354                              NULL);
2355             break;
2356         }
2357     }
2358
2359     err = av_find_stream_info(ic);
2360     if (err < 0) {
2361         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2362         ret = -1;
2363         goto fail;
2364     }
2365     if(ic->pb)
2366         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2367
2368     if(seek_by_bytes<0)
2369         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2370
2371     /* if seeking requested, we execute it */
2372     if (start_time != AV_NOPTS_VALUE) {
2373         int64_t timestamp;
2374
2375         timestamp = start_time;
2376         /* add the stream start time */
2377         if (ic->start_time != AV_NOPTS_VALUE)
2378             timestamp += ic->start_time;
2379         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2380         if (ret < 0) {
2381             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2382                     is->filename, (double)timestamp / AV_TIME_BASE);
2383         }
2384     }
2385
2386     for (i = 0; i < ic->nb_streams; i++)
2387         ic->streams[i]->discard = AVDISCARD_ALL;
2388     if (!video_disable)
2389         st_index[AVMEDIA_TYPE_VIDEO] =
2390             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2391                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2392     if (!audio_disable)
2393         st_index[AVMEDIA_TYPE_AUDIO] =
2394             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2395                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2396                                 st_index[AVMEDIA_TYPE_VIDEO],
2397                                 NULL, 0);
2398     if (!video_disable)
2399         st_index[AVMEDIA_TYPE_SUBTITLE] =
2400             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2401                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2402                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2403                                  st_index[AVMEDIA_TYPE_AUDIO] :
2404                                  st_index[AVMEDIA_TYPE_VIDEO]),
2405                                 NULL, 0);
2406     if (show_status) {
2407         av_dump_format(ic, 0, is->filename, 0);
2408     }
2409
2410     /* open the streams */
2411     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2412         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2413     }
2414
2415     ret=-1;
2416     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2417         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2418     }
2419     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2420     if(ret<0) {
2421         if (!display_disable)
2422             is->show_audio = 2;
2423     }
2424
2425     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2426         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2427     }
2428
2429     if (is->video_stream < 0 && is->audio_stream < 0) {
2430         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2431         ret = -1;
2432         goto fail;
2433     }
2434
2435     for(;;) {
2436         if (is->abort_request)
2437             break;
2438         if (is->paused != is->last_paused) {
2439             is->last_paused = is->paused;
2440             if (is->paused)
2441                 is->read_pause_return= av_read_pause(ic);
2442             else
2443                 av_read_play(ic);
2444         }
2445 #if CONFIG_RTSP_DEMUXER
2446         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2447             /* wait 10 ms to avoid trying to get another packet */
2448             /* XXX: horrible */
2449             SDL_Delay(10);
2450             continue;
2451         }
2452 #endif
2453         if (is->seek_req) {
2454             int64_t seek_target= is->seek_pos;
2455             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2456             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2457 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2458 //      of the seek_pos/seek_rel variables
2459
2460             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2461             if (ret < 0) {
2462                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2463             }else{
2464                 if (is->audio_stream >= 0) {
2465                     packet_queue_flush(&is->audioq);
2466                     packet_queue_put(&is->audioq, &flush_pkt);
2467                 }
2468                 if (is->subtitle_stream >= 0) {
2469                     packet_queue_flush(&is->subtitleq);
2470                     packet_queue_put(&is->subtitleq, &flush_pkt);
2471                 }
2472                 if (is->video_stream >= 0) {
2473                     packet_queue_flush(&is->videoq);
2474                     packet_queue_put(&is->videoq, &flush_pkt);
2475                 }
2476             }
2477             is->seek_req = 0;
2478             eof= 0;
2479         }
2480
2481         /* if the queue are full, no need to read more */
2482         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2483             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2484                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2485                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2486             /* wait 10 ms */
2487             SDL_Delay(10);
2488             continue;
2489         }
2490         if(eof) {
2491             if(is->video_stream >= 0){
2492                 av_init_packet(pkt);
2493                 pkt->data=NULL;
2494                 pkt->size=0;
2495                 pkt->stream_index= is->video_stream;
2496                 packet_queue_put(&is->videoq, pkt);
2497             }
2498             SDL_Delay(10);
2499             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2500                 if(loop!=1 && (!loop || --loop)){
2501                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2502                 }else if(autoexit){
2503                     ret=AVERROR_EOF;
2504                     goto fail;
2505                 }
2506             }
2507             continue;
2508         }
2509         ret = av_read_frame(ic, pkt);
2510         if (ret < 0) {
2511             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2512                 eof=1;
2513             if (ic->pb && ic->pb->error)
2514                 break;
2515             SDL_Delay(100); /* wait for user event */
2516             continue;
2517         }
2518         /* check if packet is in play range specified by user, then queue, otherwise discard */
2519         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2520                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2521                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2522                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2523                 <= ((double)duration/1000000);
2524         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2525             packet_queue_put(&is->audioq, pkt);
2526         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2527             packet_queue_put(&is->videoq, pkt);
2528         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2529             packet_queue_put(&is->subtitleq, pkt);
2530         } else {
2531             av_free_packet(pkt);
2532         }
2533     }
2534     /* wait until the end */
2535     while (!is->abort_request) {
2536         SDL_Delay(100);
2537     }
2538
2539     ret = 0;
2540  fail:
2541     /* disable interrupting */
2542     global_video_state = NULL;
2543
2544     /* close each stream */
2545     if (is->audio_stream >= 0)
2546         stream_component_close(is, is->audio_stream);
2547     if (is->video_stream >= 0)
2548         stream_component_close(is, is->video_stream);
2549     if (is->subtitle_stream >= 0)
2550         stream_component_close(is, is->subtitle_stream);
2551     if (is->ic) {
2552         av_close_input_file(is->ic);
2553         is->ic = NULL; /* safety */
2554     }
2555     avio_set_interrupt_cb(NULL);
2556
2557     if (ret != 0) {
2558         SDL_Event event;
2559
2560         event.type = FF_QUIT_EVENT;
2561         event.user.data1 = is;
2562         SDL_PushEvent(&event);
2563     }
2564     return 0;
2565 }
2566
2567 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2568 {
2569     VideoState *is;
2570
2571     is = av_mallocz(sizeof(VideoState));
2572     if (!is)
2573         return NULL;
2574     av_strlcpy(is->filename, filename, sizeof(is->filename));
2575     is->iformat = iformat;
2576     is->ytop = 0;
2577     is->xleft = 0;
2578
2579     /* start video display */
2580     is->pictq_mutex = SDL_CreateMutex();
2581     is->pictq_cond = SDL_CreateCond();
2582
2583     is->subpq_mutex = SDL_CreateMutex();
2584     is->subpq_cond = SDL_CreateCond();
2585
2586     is->av_sync_type = av_sync_type;
2587     is->parse_tid = SDL_CreateThread(decode_thread, is);
2588     if (!is->parse_tid) {
2589         av_free(is);
2590         return NULL;
2591     }
2592     return is;
2593 }
2594
2595 static void stream_cycle_channel(VideoState *is, int codec_type)
2596 {
2597     AVFormatContext *ic = is->ic;
2598     int start_index, stream_index;
2599     AVStream *st;
2600
2601     if (codec_type == AVMEDIA_TYPE_VIDEO)
2602         start_index = is->video_stream;
2603     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2604         start_index = is->audio_stream;
2605     else
2606         start_index = is->subtitle_stream;
2607     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2608         return;
2609     stream_index = start_index;
2610     for(;;) {
2611         if (++stream_index >= is->ic->nb_streams)
2612         {
2613             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2614             {
2615                 stream_index = -1;
2616                 goto the_end;
2617             } else
2618                 stream_index = 0;
2619         }
2620         if (stream_index == start_index)
2621             return;
2622         st = ic->streams[stream_index];
2623         if (st->codec->codec_type == codec_type) {
2624             /* check that parameters are OK */
2625             switch(codec_type) {
2626             case AVMEDIA_TYPE_AUDIO:
2627                 if (st->codec->sample_rate != 0 &&
2628                     st->codec->channels != 0)
2629                     goto the_end;
2630                 break;
2631             case AVMEDIA_TYPE_VIDEO:
2632             case AVMEDIA_TYPE_SUBTITLE:
2633                 goto the_end;
2634             default:
2635                 break;
2636             }
2637         }
2638     }
2639  the_end:
2640     stream_component_close(is, start_index);
2641     stream_component_open(is, stream_index);
2642 }
2643
2644
2645 static void toggle_full_screen(void)
2646 {
2647     is_full_screen = !is_full_screen;
2648     video_open(cur_stream);
2649 }
2650
2651 static void toggle_pause(void)
2652 {
2653     if (cur_stream)
2654         stream_pause(cur_stream);
2655     step = 0;
2656 }
2657
2658 static void step_to_next_frame(void)
2659 {
2660     if (cur_stream) {
2661         /* if the stream is paused unpause it, then step */
2662         if (cur_stream->paused)
2663             stream_pause(cur_stream);
2664     }
2665     step = 1;
2666 }
2667
2668 static void toggle_audio_display(void)
2669 {
2670     if (cur_stream) {
2671         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2672         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2673         fill_rectangle(screen,
2674                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2675                     bgcolor);
2676         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2677     }
2678 }
2679
2680 /* handle an event sent by the GUI */
2681 static void event_loop(void)
2682 {
2683     SDL_Event event;
2684     double incr, pos, frac;
2685
2686     for(;;) {
2687         double x;
2688         SDL_WaitEvent(&event);
2689         switch(event.type) {
2690         case SDL_KEYDOWN:
2691             if (exit_on_keydown) {
2692                 do_exit();
2693                 break;
2694             }
2695             switch(event.key.keysym.sym) {
2696             case SDLK_ESCAPE:
2697             case SDLK_q:
2698                 do_exit();
2699                 break;
2700             case SDLK_f:
2701                 toggle_full_screen();
2702                 break;
2703             case SDLK_p:
2704             case SDLK_SPACE:
2705                 toggle_pause();
2706                 break;
2707             case SDLK_s: //S: Step to next frame
2708                 step_to_next_frame();
2709                 break;
2710             case SDLK_a:
2711                 if (cur_stream)
2712                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2713                 break;
2714             case SDLK_v:
2715                 if (cur_stream)
2716                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2717                 break;
2718             case SDLK_t:
2719                 if (cur_stream)
2720                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2721                 break;
2722             case SDLK_w:
2723                 toggle_audio_display();
2724                 break;
2725             case SDLK_LEFT:
2726                 incr = -10.0;
2727                 goto do_seek;
2728             case SDLK_RIGHT:
2729                 incr = 10.0;
2730                 goto do_seek;
2731             case SDLK_UP:
2732                 incr = 60.0;
2733                 goto do_seek;
2734             case SDLK_DOWN:
2735                 incr = -60.0;
2736             do_seek:
2737                 if (cur_stream) {
2738                     if (seek_by_bytes) {
2739                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2740                             pos= cur_stream->video_current_pos;
2741                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2742                             pos= cur_stream->audio_pkt.pos;
2743                         }else
2744                             pos = avio_tell(cur_stream->ic->pb);
2745                         if (cur_stream->ic->bit_rate)
2746                             incr *= cur_stream->ic->bit_rate / 8.0;
2747                         else
2748                             incr *= 180000.0;
2749                         pos += incr;
2750                         stream_seek(cur_stream, pos, incr, 1);
2751                     } else {
2752                         pos = get_master_clock(cur_stream);
2753                         pos += incr;
2754                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2755                     }
2756                 }
2757                 break;
2758             default:
2759                 break;
2760             }
2761             break;
2762         case SDL_MOUSEBUTTONDOWN:
2763             if (exit_on_mousedown) {
2764                 do_exit();
2765                 break;
2766             }
2767         case SDL_MOUSEMOTION:
2768             if(event.type ==SDL_MOUSEBUTTONDOWN){
2769                 x= event.button.x;
2770             }else{
2771                 if(event.motion.state != SDL_PRESSED)
2772                     break;
2773                 x= event.motion.x;
2774             }
2775             if (cur_stream) {
2776                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2777                     uint64_t size=  avio_size(cur_stream->ic->pb);
2778                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2779                 }else{
2780                     int64_t ts;
2781                     int ns, hh, mm, ss;
2782                     int tns, thh, tmm, tss;
2783                     tns = cur_stream->ic->duration/1000000LL;
2784                     thh = tns/3600;
2785                     tmm = (tns%3600)/60;
2786                     tss = (tns%60);
2787                     frac = x/cur_stream->width;
2788                     ns = frac*tns;
2789                     hh = ns/3600;
2790                     mm = (ns%3600)/60;
2791                     ss = (ns%60);
2792                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2793                             hh, mm, ss, thh, tmm, tss);
2794                     ts = frac*cur_stream->ic->duration;
2795                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2796                         ts += cur_stream->ic->start_time;
2797                     stream_seek(cur_stream, ts, 0, 0);
2798                 }
2799             }
2800             break;
2801         case SDL_VIDEORESIZE:
2802             if (cur_stream) {
2803                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2804                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2805                 screen_width = cur_stream->width = event.resize.w;
2806                 screen_height= cur_stream->height= event.resize.h;
2807             }
2808             break;
2809         case SDL_QUIT:
2810         case FF_QUIT_EVENT:
2811             do_exit();
2812             break;
2813         case FF_ALLOC_EVENT:
2814             video_open(event.user.data1);
2815             alloc_picture(event.user.data1);
2816             break;
2817         case FF_REFRESH_EVENT:
2818             video_refresh_timer(event.user.data1);
2819             cur_stream->refresh=0;
2820             break;
2821         default:
2822             break;
2823         }
2824     }
2825 }
2826
2827 static int opt_frame_size(const char *opt, const char *arg)
2828 {
2829     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2830         fprintf(stderr, "Incorrect frame size\n");
2831         return AVERROR(EINVAL);
2832     }
2833     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2834         fprintf(stderr, "Frame size must be a multiple of 2\n");
2835         return AVERROR(EINVAL);
2836     }
2837     return 0;
2838 }
2839
2840 static int opt_width(const char *opt, const char *arg)
2841 {
2842     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2843     return 0;
2844 }
2845
2846 static int opt_height(const char *opt, const char *arg)
2847 {
2848     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2849     return 0;
2850 }
2851
2852 static int opt_format(const char *opt, const char *arg)
2853 {
2854     file_iformat = av_find_input_format(arg);
2855     if (!file_iformat) {
2856         fprintf(stderr, "Unknown input format: %s\n", arg);
2857         return AVERROR(EINVAL);
2858     }
2859     return 0;
2860 }
2861
2862 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2863 {
2864     frame_pix_fmt = av_get_pix_fmt(arg);
2865     return 0;
2866 }
2867
2868 static int opt_sync(const char *opt, const char *arg)
2869 {
2870     if (!strcmp(arg, "audio"))
2871         av_sync_type = AV_SYNC_AUDIO_MASTER;
2872     else if (!strcmp(arg, "video"))
2873         av_sync_type = AV_SYNC_VIDEO_MASTER;
2874     else if (!strcmp(arg, "ext"))
2875         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2876     else {
2877         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2878         exit(1);
2879     }
2880     return 0;
2881 }
2882
2883 static int opt_seek(const char *opt, const char *arg)
2884 {
2885     start_time = parse_time_or_die(opt, arg, 1);
2886     return 0;
2887 }
2888
2889 static int opt_duration(const char *opt, const char *arg)
2890 {
2891     duration = parse_time_or_die(opt, arg, 1);
2892     return 0;
2893 }
2894
2895 static int opt_debug(const char *opt, const char *arg)
2896 {
2897     av_log_set_level(99);
2898     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2899     return 0;
2900 }
2901
2902 static int opt_vismv(const char *opt, const char *arg)
2903 {
2904     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2905     return 0;
2906 }
2907
2908 static int opt_thread_count(const char *opt, const char *arg)
2909 {
2910     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2911 #if !HAVE_THREADS
2912     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2913 #endif
2914     return 0;
2915 }
2916
2917 static const OptionDef options[] = {
2918 #include "cmdutils_common_opts.h"
2919     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2920     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2921     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2922     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2923     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2924     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2925     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2926     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2927     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2928     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2929     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2930     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2931     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2932     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2933     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2934     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2935     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2936     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2937     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2938     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2939     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2940     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2941     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2942     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2943     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2944     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2945     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2946     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2947     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2948     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2949     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2950     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2951     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2952     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2953     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2954     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2955     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2956 #if CONFIG_AVFILTER
2957     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2958 #endif
2959     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2960     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2961     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2962     { NULL, },
2963 };
2964
2965 static void show_usage(void)
2966 {
2967     printf("Simple media player\n");
2968     printf("usage: ffplay [options] input_file\n");
2969     printf("\n");
2970 }
2971
2972 static void show_help(void)
2973 {
2974     av_log_set_callback(log_callback_help);
2975     show_usage();
2976     show_help_options(options, "Main options:\n",
2977                       OPT_EXPERT, 0);
2978     show_help_options(options, "\nAdvanced options:\n",
2979                       OPT_EXPERT, OPT_EXPERT);
2980     printf("\n");
2981     av_opt_show2(avcodec_opts[0], NULL,
2982                  AV_OPT_FLAG_DECODING_PARAM, 0);
2983     printf("\n");
2984     av_opt_show2(avformat_opts, NULL,
2985                  AV_OPT_FLAG_DECODING_PARAM, 0);
2986 #if !CONFIG_AVFILTER
2987     printf("\n");
2988     av_opt_show2(sws_opts, NULL,
2989                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2990 #endif
2991     printf("\nWhile playing:\n"
2992            "q, ESC              quit\n"
2993            "f                   toggle full screen\n"
2994            "p, SPC              pause\n"
2995            "a                   cycle audio channel\n"
2996            "v                   cycle video channel\n"
2997            "t                   cycle subtitle channel\n"
2998            "w                   show audio waves\n"
2999            "s                   activate frame-step mode\n"
3000            "left/right          seek backward/forward 10 seconds\n"
3001            "down/up             seek backward/forward 1 minute\n"
3002            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3003            );
3004 }
3005
3006 static void opt_input_file(const char *filename)
3007 {
3008     if (input_filename) {
3009         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3010                 filename, input_filename);
3011         exit(1);
3012     }
3013     if (!strcmp(filename, "-"))
3014         filename = "pipe:";
3015     input_filename = filename;
3016 }
3017
3018 /* Called from the main */
3019 int main(int argc, char **argv)
3020 {
3021     int flags;
3022
3023     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3024
3025     /* register all codecs, demux and protocols */
3026     avcodec_register_all();
3027 #if CONFIG_AVDEVICE
3028     avdevice_register_all();
3029 #endif
3030 #if CONFIG_AVFILTER
3031     avfilter_register_all();
3032 #endif
3033     av_register_all();
3034
3035     init_opts();
3036
3037     show_banner();
3038
3039     parse_options(argc, argv, options, opt_input_file);
3040
3041     if (!input_filename) {
3042         show_usage();
3043         fprintf(stderr, "An input file must be specified\n");
3044         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3045         exit(1);
3046     }
3047
3048     if (display_disable) {
3049         video_disable = 1;
3050     }
3051     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3052 #if !defined(__MINGW32__) && !defined(__APPLE__)
3053     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3054 #endif
3055     if (SDL_Init (flags)) {
3056         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3057         exit(1);
3058     }
3059
3060     if (!display_disable) {
3061 #if HAVE_SDL_VIDEO_SIZE
3062         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3063         fs_screen_width = vi->current_w;
3064         fs_screen_height = vi->current_h;
3065 #endif
3066     }
3067
3068     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3069     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3070     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3071
3072     av_init_packet(&flush_pkt);
3073     flush_pkt.data= "FLUSH";
3074
3075     cur_stream = stream_open(input_filename, file_iformat);
3076
3077     event_loop();
3078
3079     /* never returns */
3080
3081     return 0;
3082 }