Replace some nonstandard DEBUG_* preprocessor directives by plain DEBUG.
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/opt.h"
37 #include "libavcodec/avfft.h"
38
39 #if CONFIG_AVFILTER
40 # include "libavfilter/avfilter.h"
41 # include "libavfilter/avfiltergraph.h"
42 #endif
43
44 #include "cmdutils.h"
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #ifdef __MINGW32__
50 #undef main /* We don't want SDL to override our main() */
51 #endif
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100     int64_t pos;                                 ///<byte position in file
101     SDL_Overlay *bmp;
102     int width, height; /* source height & width */
103     int allocated;
104     enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107     AVFilterBufferRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112     double pts; /* presentation time stamp for this picture */
113     AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117     AV_SYNC_AUDIO_MASTER, /* default choice */
118     AV_SYNC_VIDEO_MASTER,
119     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123     SDL_Thread *parse_tid;
124     SDL_Thread *video_tid;
125     SDL_Thread *refresh_tid;
126     AVInputFormat *iformat;
127     int no_background;
128     int abort_request;
129     int paused;
130     int last_paused;
131     int seek_req;
132     int seek_flags;
133     int64_t seek_pos;
134     int64_t seek_rel;
135     int read_pause_return;
136     AVFormatContext *ic;
137     int dtg_active_format;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     int show_audio; /* if true, display audio samples */
166     int16_t sample_array[SAMPLE_ARRAY_SIZE];
167     int sample_array_index;
168     int last_i_start;
169     RDFTContext *rdft;
170     int rdft_bits;
171     FFTSample *rdft_data;
172     int xpos;
173
174     SDL_Thread *subtitle_tid;
175     int subtitle_stream;
176     int subtitle_stream_changed;
177     AVStream *subtitle_st;
178     PacketQueue subtitleq;
179     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180     int subpq_size, subpq_rindex, subpq_windex;
181     SDL_mutex *subpq_mutex;
182     SDL_cond *subpq_cond;
183
184     double frame_timer;
185     double frame_last_pts;
186     double frame_last_delay;
187     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
188     int video_stream;
189     AVStream *video_st;
190     PacketQueue videoq;
191     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
192     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193     int64_t video_current_pos;                   ///<current displayed file pos
194     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195     int pictq_size, pictq_rindex, pictq_windex;
196     SDL_mutex *pictq_mutex;
197     SDL_cond *pictq_cond;
198 #if !CONFIG_AVFILTER
199     struct SwsContext *img_convert_ctx;
200 #endif
201
202     //    QETimer *video_timer;
203     char filename[1024];
204     int width, height, xleft, ytop;
205
206     PtsCorrectionContext pts_ctx;
207
208 #if CONFIG_AVFILTER
209     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
210 #endif
211
212     float skip_frames;
213     float skip_frames_index;
214     int refresh;
215 } VideoState;
216
217 static void show_help(void);
218
219 /* options specified by the user */
220 static AVInputFormat *file_iformat;
221 static const char *input_filename;
222 static const char *window_title;
223 static int fs_screen_width;
224 static int fs_screen_height;
225 static int screen_width = 0;
226 static int screen_height = 0;
227 static int frame_width = 0;
228 static int frame_height = 0;
229 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int debug = 0;
244 static int debug_mv = 0;
245 static int step = 0;
246 static int thread_count = 1;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int lowres = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
255 static int error_recognition = FF_ER_CAREFUL;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts= -1;
258 static int autoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop=1;
262 static int framedrop=1;
263
264 static int rdftspeed=20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for(;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (vp->picref->video->pixel_aspect.num == 0)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st)
673         {
674             if (is->subpq_size > 0)
675             {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679                 {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = width;
713         rect.h = height;
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 /* get the current audio output buffer size, in samples. With SDL, we
719    cannot have a precise information */
720 static int audio_write_get_buf_size(VideoState *is)
721 {
722     return is->audio_buf_size - is->audio_buf_index;
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     a = a % b;
728     if (a >= 0)
729         return a;
730     else
731         return a + b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
742         ;
743     nb_freq= 1<<(rdft_bits-1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->audio_st->codec->channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
750         n = 2 * channels;
751         delay = audio_write_get_buf_size(s);
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime() - audio_callback_time;
758             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
759         }
760
761         delay += 2*data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if(s->show_audio==1){
767             h= INT_MIN;
768             for(i=0; i<1000; i+=channels){
769                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a= s->sample_array[idx];
771                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
772                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
773                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
774                 int score= a-d;
775                 if(h<score && (b^c)<0){
776                     h= score;
777                     i_start= idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if(s->show_audio==1){
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for(ch = 0;ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for(x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for(ch = 1;ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     }else{
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if(rdft_bits != s->rdft_bits){
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits= rdft_bits;
835             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for(ch = 0;ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2*nb_freq*ch;
841                 i = i_start + ch;
842                 for(x = 0; x < 2*nb_freq; x++) {
843                     double w= (x-nb_freq)*(1.0/nb_freq);
844                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             //least efficient way to do this, we should of course directly access it but its more than fast enough
852             for(y=0; y<s->height; y++){
853                 double w= 1/sqrt(nb_freq);
854                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
855                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
856                        + data[1][2*y+1]*data[1][2*y+1])) : a;
857                 a= FFMIN(a,255);
858                 b= FFMIN(b,255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if(s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static int video_open(VideoState *is){
874     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
875     int w,h;
876
877     if(is_full_screen) flags |= SDL_FULLSCREEN;
878     else               flags |= SDL_RESIZABLE;
879
880     if (is_full_screen && fs_screen_width) {
881         w = fs_screen_width;
882         h = fs_screen_height;
883     } else if(!is_full_screen && screen_width){
884         w = screen_width;
885         h = screen_height;
886 #if CONFIG_AVFILTER
887     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
888         w = is->out_video_filter->inputs[0]->w;
889         h = is->out_video_filter->inputs[0]->h;
890 #else
891     }else if (is->video_st && is->video_st->codec->width){
892         w = is->video_st->codec->width;
893         h = is->video_st->codec->height;
894 #endif
895     } else {
896         w = 640;
897         h = 480;
898     }
899     if(screen && is->width == screen->w && screen->w == w
900        && is->height== screen->h && screen->h == h)
901         return 0;
902
903 #ifndef __APPLE__
904     screen = SDL_SetVideoMode(w, h, 0, flags);
905 #else
906     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
907     screen = SDL_SetVideoMode(w, h, 24, flags);
908 #endif
909     if (!screen) {
910         fprintf(stderr, "SDL: could not set video mode - exiting\n");
911         return -1;
912     }
913     if (!window_title)
914         window_title = input_filename;
915     SDL_WM_SetCaption(window_title, window_title);
916
917     is->width = screen->w;
918     is->height = screen->h;
919
920     return 0;
921 }
922
923 /* display the current picture, if any */
924 static void video_display(VideoState *is)
925 {
926     if(!screen)
927         video_open(cur_stream);
928     if (is->audio_st && is->show_audio)
929         video_audio_display(is);
930     else if (is->video_st)
931         video_image_display(is);
932 }
933
934 static int refresh_thread(void *opaque)
935 {
936     VideoState *is= opaque;
937     while(!is->abort_request){
938         SDL_Event event;
939         event.type = FF_REFRESH_EVENT;
940         event.user.data1 = opaque;
941         if(!is->refresh){
942             is->refresh=1;
943             SDL_PushEvent(&event);
944         }
945         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
946     }
947     return 0;
948 }
949
950 /* get the current audio clock value */
951 static double get_audio_clock(VideoState *is)
952 {
953     double pts;
954     int hw_buf_size, bytes_per_sec;
955     pts = is->audio_clock;
956     hw_buf_size = audio_write_get_buf_size(is);
957     bytes_per_sec = 0;
958     if (is->audio_st) {
959         bytes_per_sec = is->audio_st->codec->sample_rate *
960             2 * is->audio_st->codec->channels;
961     }
962     if (bytes_per_sec)
963         pts -= (double)hw_buf_size / bytes_per_sec;
964     return pts;
965 }
966
967 /* get the current video clock value */
968 static double get_video_clock(VideoState *is)
969 {
970     if (is->paused) {
971         return is->video_current_pts;
972     } else {
973         return is->video_current_pts_drift + av_gettime() / 1000000.0;
974     }
975 }
976
977 /* get the current external clock value */
978 static double get_external_clock(VideoState *is)
979 {
980     int64_t ti;
981     ti = av_gettime();
982     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
983 }
984
985 /* get the current master clock value */
986 static double get_master_clock(VideoState *is)
987 {
988     double val;
989
990     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
991         if (is->video_st)
992             val = get_video_clock(is);
993         else
994             val = get_audio_clock(is);
995     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
996         if (is->audio_st)
997             val = get_audio_clock(is);
998         else
999             val = get_video_clock(is);
1000     } else {
1001         val = get_external_clock(is);
1002     }
1003     return val;
1004 }
1005
1006 /* seek in the stream */
1007 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1008 {
1009     if (!is->seek_req) {
1010         is->seek_pos = pos;
1011         is->seek_rel = rel;
1012         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1013         if (seek_by_bytes)
1014             is->seek_flags |= AVSEEK_FLAG_BYTE;
1015         is->seek_req = 1;
1016     }
1017 }
1018
1019 /* pause or resume the video */
1020 static void stream_pause(VideoState *is)
1021 {
1022     if (is->paused) {
1023         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1024         if(is->read_pause_return != AVERROR(ENOSYS)){
1025             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1026         }
1027         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1028     }
1029     is->paused = !is->paused;
1030 }
1031
1032 static double compute_target_time(double frame_current_pts, VideoState *is)
1033 {
1034     double delay, sync_threshold, diff;
1035
1036     /* compute nominal delay */
1037     delay = frame_current_pts - is->frame_last_pts;
1038     if (delay <= 0 || delay >= 10.0) {
1039         /* if incorrect delay, use previous one */
1040         delay = is->frame_last_delay;
1041     } else {
1042         is->frame_last_delay = delay;
1043     }
1044     is->frame_last_pts = frame_current_pts;
1045
1046     /* update delay to follow master synchronisation source */
1047     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1048          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1049         /* if video is slave, we try to correct big delays by
1050            duplicating or deleting a frame */
1051         diff = get_video_clock(is) - get_master_clock(is);
1052
1053         /* skip or repeat frame. We take into account the
1054            delay to compute the threshold. I still don't know
1055            if it is the best guess */
1056         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1057         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1058             if (diff <= -sync_threshold)
1059                 delay = 0;
1060             else if (diff >= sync_threshold)
1061                 delay = 2 * delay;
1062         }
1063     }
1064     is->frame_timer += delay;
1065 #if defined(DEBUG_SYNC)
1066     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1067             delay, actual_delay, frame_current_pts, -diff);
1068 #endif
1069
1070     return is->frame_timer;
1071 }
1072
1073 /* called to display each frame */
1074 static void video_refresh_timer(void *opaque)
1075 {
1076     VideoState *is = opaque;
1077     VideoPicture *vp;
1078
1079     SubPicture *sp, *sp2;
1080
1081     if (is->video_st) {
1082 retry:
1083         if (is->pictq_size == 0) {
1084             //nothing to do, no picture to display in the que
1085         } else {
1086             double time= av_gettime()/1000000.0;
1087             double next_target;
1088             /* dequeue the picture */
1089             vp = &is->pictq[is->pictq_rindex];
1090
1091             if(time < vp->target_clock)
1092                 return;
1093             /* update current video pts */
1094             is->video_current_pts = vp->pts;
1095             is->video_current_pts_drift = is->video_current_pts - time;
1096             is->video_current_pos = vp->pos;
1097             if(is->pictq_size > 1){
1098                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1099                 assert(nextvp->target_clock >= vp->target_clock);
1100                 next_target= nextvp->target_clock;
1101             }else{
1102                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1103             }
1104             if(framedrop && time > next_target){
1105                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1106                 if(is->pictq_size > 1 || time > next_target + 0.5){
1107                     /* update queue size and signal for next picture */
1108                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1109                         is->pictq_rindex = 0;
1110
1111                     SDL_LockMutex(is->pictq_mutex);
1112                     is->pictq_size--;
1113                     SDL_CondSignal(is->pictq_cond);
1114                     SDL_UnlockMutex(is->pictq_mutex);
1115                     goto retry;
1116                 }
1117             }
1118
1119             if(is->subtitle_st) {
1120                 if (is->subtitle_stream_changed) {
1121                     SDL_LockMutex(is->subpq_mutex);
1122
1123                     while (is->subpq_size) {
1124                         free_subpicture(&is->subpq[is->subpq_rindex]);
1125
1126                         /* update queue size and signal for next picture */
1127                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1128                             is->subpq_rindex = 0;
1129
1130                         is->subpq_size--;
1131                     }
1132                     is->subtitle_stream_changed = 0;
1133
1134                     SDL_CondSignal(is->subpq_cond);
1135                     SDL_UnlockMutex(is->subpq_mutex);
1136                 } else {
1137                     if (is->subpq_size > 0) {
1138                         sp = &is->subpq[is->subpq_rindex];
1139
1140                         if (is->subpq_size > 1)
1141                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1142                         else
1143                             sp2 = NULL;
1144
1145                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1146                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1147                         {
1148                             free_subpicture(sp);
1149
1150                             /* update queue size and signal for next picture */
1151                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1152                                 is->subpq_rindex = 0;
1153
1154                             SDL_LockMutex(is->subpq_mutex);
1155                             is->subpq_size--;
1156                             SDL_CondSignal(is->subpq_cond);
1157                             SDL_UnlockMutex(is->subpq_mutex);
1158                         }
1159                     }
1160                 }
1161             }
1162
1163             /* display picture */
1164             if (!display_disable)
1165                 video_display(is);
1166
1167             /* update queue size and signal for next picture */
1168             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1169                 is->pictq_rindex = 0;
1170
1171             SDL_LockMutex(is->pictq_mutex);
1172             is->pictq_size--;
1173             SDL_CondSignal(is->pictq_cond);
1174             SDL_UnlockMutex(is->pictq_mutex);
1175         }
1176     } else if (is->audio_st) {
1177         /* draw the next audio frame */
1178
1179         /* if only audio stream, then display the audio bars (better
1180            than nothing, just to test the implementation */
1181
1182         /* display picture */
1183         if (!display_disable)
1184             video_display(is);
1185     }
1186     if (show_status) {
1187         static int64_t last_time;
1188         int64_t cur_time;
1189         int aqsize, vqsize, sqsize;
1190         double av_diff;
1191
1192         cur_time = av_gettime();
1193         if (!last_time || (cur_time - last_time) >= 30000) {
1194             aqsize = 0;
1195             vqsize = 0;
1196             sqsize = 0;
1197             if (is->audio_st)
1198                 aqsize = is->audioq.size;
1199             if (is->video_st)
1200                 vqsize = is->videoq.size;
1201             if (is->subtitle_st)
1202                 sqsize = is->subtitleq.size;
1203             av_diff = 0;
1204             if (is->audio_st && is->video_st)
1205                 av_diff = get_audio_clock(is) - get_video_clock(is);
1206             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1207                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1208             fflush(stdout);
1209             last_time = cur_time;
1210         }
1211     }
1212 }
1213
1214 static void stream_close(VideoState *is)
1215 {
1216     VideoPicture *vp;
1217     int i;
1218     /* XXX: use a special url_shutdown call to abort parse cleanly */
1219     is->abort_request = 1;
1220     SDL_WaitThread(is->parse_tid, NULL);
1221     SDL_WaitThread(is->refresh_tid, NULL);
1222
1223     /* free all pictures */
1224     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1225         vp = &is->pictq[i];
1226 #if CONFIG_AVFILTER
1227         if (vp->picref) {
1228             avfilter_unref_buffer(vp->picref);
1229             vp->picref = NULL;
1230         }
1231 #endif
1232         if (vp->bmp) {
1233             SDL_FreeYUVOverlay(vp->bmp);
1234             vp->bmp = NULL;
1235         }
1236     }
1237     SDL_DestroyMutex(is->pictq_mutex);
1238     SDL_DestroyCond(is->pictq_cond);
1239     SDL_DestroyMutex(is->subpq_mutex);
1240     SDL_DestroyCond(is->subpq_cond);
1241 #if !CONFIG_AVFILTER
1242     if (is->img_convert_ctx)
1243         sws_freeContext(is->img_convert_ctx);
1244 #endif
1245     av_free(is);
1246 }
1247
1248 static void do_exit(void)
1249 {
1250     if (cur_stream) {
1251         stream_close(cur_stream);
1252         cur_stream = NULL;
1253     }
1254     uninit_opts();
1255 #if CONFIG_AVFILTER
1256     avfilter_uninit();
1257 #endif
1258     if (show_status)
1259         printf("\n");
1260     SDL_Quit();
1261     av_log(NULL, AV_LOG_QUIET, "");
1262     exit(0);
1263 }
1264
1265 /* allocate a picture (needs to do that in main thread to avoid
1266    potential locking problems */
1267 static void alloc_picture(void *opaque)
1268 {
1269     VideoState *is = opaque;
1270     VideoPicture *vp;
1271
1272     vp = &is->pictq[is->pictq_windex];
1273
1274     if (vp->bmp)
1275         SDL_FreeYUVOverlay(vp->bmp);
1276
1277 #if CONFIG_AVFILTER
1278     if (vp->picref)
1279         avfilter_unref_buffer(vp->picref);
1280     vp->picref = NULL;
1281
1282     vp->width   = is->out_video_filter->inputs[0]->w;
1283     vp->height  = is->out_video_filter->inputs[0]->h;
1284     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1285 #else
1286     vp->width   = is->video_st->codec->width;
1287     vp->height  = is->video_st->codec->height;
1288     vp->pix_fmt = is->video_st->codec->pix_fmt;
1289 #endif
1290
1291     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1292                                    SDL_YV12_OVERLAY,
1293                                    screen);
1294     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1295         /* SDL allocates a buffer smaller than requested if the video
1296          * overlay hardware is unable to support the requested size. */
1297         fprintf(stderr, "Error: the video system does not support an image\n"
1298                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1299                         "to reduce the image size.\n", vp->width, vp->height );
1300         do_exit();
1301     }
1302
1303     SDL_LockMutex(is->pictq_mutex);
1304     vp->allocated = 1;
1305     SDL_CondSignal(is->pictq_cond);
1306     SDL_UnlockMutex(is->pictq_mutex);
1307 }
1308
1309 /**
1310  *
1311  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1312  */
1313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1314 {
1315     VideoPicture *vp;
1316     int dst_pix_fmt;
1317 #if CONFIG_AVFILTER
1318     AVPicture pict_src;
1319 #endif
1320     /* wait until we have space to put a new picture */
1321     SDL_LockMutex(is->pictq_mutex);
1322
1323     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1324         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1325
1326     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1327            !is->videoq.abort_request) {
1328         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1329     }
1330     SDL_UnlockMutex(is->pictq_mutex);
1331
1332     if (is->videoq.abort_request)
1333         return -1;
1334
1335     vp = &is->pictq[is->pictq_windex];
1336
1337     /* alloc or resize hardware picture buffer */
1338     if (!vp->bmp ||
1339 #if CONFIG_AVFILTER
1340         vp->width  != is->out_video_filter->inputs[0]->w ||
1341         vp->height != is->out_video_filter->inputs[0]->h) {
1342 #else
1343         vp->width != is->video_st->codec->width ||
1344         vp->height != is->video_st->codec->height) {
1345 #endif
1346         SDL_Event event;
1347
1348         vp->allocated = 0;
1349
1350         /* the allocation must be done in the main thread to avoid
1351            locking problems */
1352         event.type = FF_ALLOC_EVENT;
1353         event.user.data1 = is;
1354         SDL_PushEvent(&event);
1355
1356         /* wait until the picture is allocated */
1357         SDL_LockMutex(is->pictq_mutex);
1358         while (!vp->allocated && !is->videoq.abort_request) {
1359             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1360         }
1361         SDL_UnlockMutex(is->pictq_mutex);
1362
1363         if (is->videoq.abort_request)
1364             return -1;
1365     }
1366
1367     /* if the frame is not skipped, then display it */
1368     if (vp->bmp) {
1369         AVPicture pict;
1370 #if CONFIG_AVFILTER
1371         if(vp->picref)
1372             avfilter_unref_buffer(vp->picref);
1373         vp->picref = src_frame->opaque;
1374 #endif
1375
1376         /* get a pointer on the bitmap */
1377         SDL_LockYUVOverlay (vp->bmp);
1378
1379         dst_pix_fmt = PIX_FMT_YUV420P;
1380         memset(&pict,0,sizeof(AVPicture));
1381         pict.data[0] = vp->bmp->pixels[0];
1382         pict.data[1] = vp->bmp->pixels[2];
1383         pict.data[2] = vp->bmp->pixels[1];
1384
1385         pict.linesize[0] = vp->bmp->pitches[0];
1386         pict.linesize[1] = vp->bmp->pitches[2];
1387         pict.linesize[2] = vp->bmp->pitches[1];
1388
1389 #if CONFIG_AVFILTER
1390         pict_src.data[0] = src_frame->data[0];
1391         pict_src.data[1] = src_frame->data[1];
1392         pict_src.data[2] = src_frame->data[2];
1393
1394         pict_src.linesize[0] = src_frame->linesize[0];
1395         pict_src.linesize[1] = src_frame->linesize[1];
1396         pict_src.linesize[2] = src_frame->linesize[2];
1397
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, &pict_src,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418
1419         /* now we can update the picture count */
1420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421             is->pictq_windex = 0;
1422         SDL_LockMutex(is->pictq_mutex);
1423         vp->target_clock= compute_target_time(vp->pts, is);
1424
1425         is->pictq_size++;
1426         SDL_UnlockMutex(is->pictq_mutex);
1427     }
1428     return 0;
1429 }
1430
1431 /**
1432  * compute the exact PTS for the picture if it is omitted in the stream
1433  * @param pts1 the dts of the pkt / pts of the frame
1434  */
1435 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1436 {
1437     double frame_delay, pts;
1438
1439     pts = pts1;
1440
1441     if (pts != 0) {
1442         /* update video clock with pts, if present */
1443         is->video_clock = pts;
1444     } else {
1445         pts = is->video_clock;
1446     }
1447     /* update video clock for next frame */
1448     frame_delay = av_q2d(is->video_st->codec->time_base);
1449     /* for MPEG2, the frame can be repeated, so we update the
1450        clock accordingly */
1451     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1452     is->video_clock += frame_delay;
1453
1454     return queue_picture(is, src_frame, pts, pos);
1455 }
1456
1457 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1458 {
1459     int len1, got_picture, i;
1460
1461     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1462         return -1;
1463
1464     if (pkt->data == flush_pkt.data) {
1465         avcodec_flush_buffers(is->video_st->codec);
1466
1467         SDL_LockMutex(is->pictq_mutex);
1468         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1469         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1470             is->pictq[i].target_clock= 0;
1471         }
1472         while (is->pictq_size && !is->videoq.abort_request) {
1473             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1474         }
1475         is->video_current_pos = -1;
1476         SDL_UnlockMutex(is->pictq_mutex);
1477
1478         init_pts_correction(&is->pts_ctx);
1479         is->frame_last_pts = AV_NOPTS_VALUE;
1480         is->frame_last_delay = 0;
1481         is->frame_timer = (double)av_gettime() / 1000000.0;
1482         is->skip_frames = 1;
1483         is->skip_frames_index = 0;
1484         return 0;
1485     }
1486
1487     len1 = avcodec_decode_video2(is->video_st->codec,
1488                                  frame, &got_picture,
1489                                  pkt);
1490
1491     if (got_picture) {
1492         if (decoder_reorder_pts == -1) {
1493             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1494         } else if (decoder_reorder_pts) {
1495             *pts = frame->pkt_pts;
1496         } else {
1497             *pts = frame->pkt_dts;
1498         }
1499
1500         if (*pts == AV_NOPTS_VALUE) {
1501             *pts = 0;
1502         }
1503
1504         is->skip_frames_index += 1;
1505         if(is->skip_frames_index >= is->skip_frames){
1506             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1507             return 1;
1508         }
1509
1510     }
1511     return 0;
1512 }
1513
1514 #if CONFIG_AVFILTER
1515 typedef struct {
1516     VideoState *is;
1517     AVFrame *frame;
1518     int use_dr1;
1519 } FilterPriv;
1520
1521 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1522 {
1523     AVFilterContext *ctx = codec->opaque;
1524     AVFilterBufferRef  *ref;
1525     int perms = AV_PERM_WRITE;
1526     int i, w, h, stride[4];
1527     unsigned edge;
1528     int pixel_size;
1529
1530     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1531         perms |= AV_PERM_NEG_LINESIZES;
1532
1533     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1534         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1535         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1536         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1537     }
1538     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1539
1540     w = codec->width;
1541     h = codec->height;
1542     avcodec_align_dimensions2(codec, &w, &h, stride);
1543     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1544     w += edge << 1;
1545     h += edge << 1;
1546
1547     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1548         return -1;
1549
1550     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1551     ref->video->w = codec->width;
1552     ref->video->h = codec->height;
1553     for(i = 0; i < 4; i ++) {
1554         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1555         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1556
1557         if (ref->data[i]) {
1558             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1559         }
1560         pic->data[i]     = ref->data[i];
1561         pic->linesize[i] = ref->linesize[i];
1562     }
1563     pic->opaque = ref;
1564     pic->age    = INT_MAX;
1565     pic->type   = FF_BUFFER_TYPE_USER;
1566     pic->reordered_opaque = codec->reordered_opaque;
1567     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1568     else           pic->pkt_pts = AV_NOPTS_VALUE;
1569     return 0;
1570 }
1571
1572 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1573 {
1574     memset(pic->data, 0, sizeof(pic->data));
1575     avfilter_unref_buffer(pic->opaque);
1576 }
1577
1578 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1579 {
1580     AVFilterBufferRef *ref = pic->opaque;
1581
1582     if (pic->data[0] == NULL) {
1583         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1584         return codec->get_buffer(codec, pic);
1585     }
1586
1587     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1588         (codec->pix_fmt != ref->format)) {
1589         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1590         return -1;
1591     }
1592
1593     pic->reordered_opaque = codec->reordered_opaque;
1594     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1595     else           pic->pkt_pts = AV_NOPTS_VALUE;
1596     return 0;
1597 }
1598
1599 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1600 {
1601     FilterPriv *priv = ctx->priv;
1602     AVCodecContext *codec;
1603     if(!opaque) return -1;
1604
1605     priv->is = opaque;
1606     codec    = priv->is->video_st->codec;
1607     codec->opaque = ctx;
1608     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1609         priv->use_dr1 = 1;
1610         codec->get_buffer     = input_get_buffer;
1611         codec->release_buffer = input_release_buffer;
1612         codec->reget_buffer   = input_reget_buffer;
1613         codec->thread_safe_callbacks = 1;
1614     }
1615
1616     priv->frame = avcodec_alloc_frame();
1617
1618     return 0;
1619 }
1620
1621 static void input_uninit(AVFilterContext *ctx)
1622 {
1623     FilterPriv *priv = ctx->priv;
1624     av_free(priv->frame);
1625 }
1626
1627 static int input_request_frame(AVFilterLink *link)
1628 {
1629     FilterPriv *priv = link->src->priv;
1630     AVFilterBufferRef *picref;
1631     int64_t pts = 0;
1632     AVPacket pkt;
1633     int ret;
1634
1635     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1636         av_free_packet(&pkt);
1637     if (ret < 0)
1638         return -1;
1639
1640     if(priv->use_dr1) {
1641         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1642     } else {
1643         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1644         av_image_copy(picref->data, picref->linesize,
1645                       priv->frame->data, priv->frame->linesize,
1646                       picref->format, link->w, link->h);
1647     }
1648     av_free_packet(&pkt);
1649
1650     picref->pts = pts;
1651     picref->pos = pkt.pos;
1652     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1653     avfilter_start_frame(link, picref);
1654     avfilter_draw_slice(link, 0, link->h, 1);
1655     avfilter_end_frame(link);
1656
1657     return 0;
1658 }
1659
1660 static int input_query_formats(AVFilterContext *ctx)
1661 {
1662     FilterPriv *priv = ctx->priv;
1663     enum PixelFormat pix_fmts[] = {
1664         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1665     };
1666
1667     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1668     return 0;
1669 }
1670
1671 static int input_config_props(AVFilterLink *link)
1672 {
1673     FilterPriv *priv  = link->src->priv;
1674     AVCodecContext *c = priv->is->video_st->codec;
1675
1676     link->w = c->width;
1677     link->h = c->height;
1678     link->time_base = priv->is->video_st->time_base;
1679
1680     return 0;
1681 }
1682
1683 static AVFilter input_filter =
1684 {
1685     .name      = "ffplay_input",
1686
1687     .priv_size = sizeof(FilterPriv),
1688
1689     .init      = input_init,
1690     .uninit    = input_uninit,
1691
1692     .query_formats = input_query_formats,
1693
1694     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1695     .outputs   = (AVFilterPad[]) {{ .name = "default",
1696                                     .type = AVMEDIA_TYPE_VIDEO,
1697                                     .request_frame = input_request_frame,
1698                                     .config_props  = input_config_props, },
1699                                   { .name = NULL }},
1700 };
1701
1702 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1703 {
1704     char sws_flags_str[128];
1705     int ret;
1706     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1707     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1708     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1709     graph->scale_sws_opts = av_strdup(sws_flags_str);
1710
1711     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1712                                             NULL, is, graph)) < 0)
1713         goto the_end;
1714     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1715                                             NULL, &ffsink_ctx, graph)) < 0)
1716         goto the_end;
1717
1718     if(vfilters) {
1719         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1720         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1721
1722         outputs->name    = av_strdup("in");
1723         outputs->filter_ctx = filt_src;
1724         outputs->pad_idx = 0;
1725         outputs->next    = NULL;
1726
1727         inputs->name    = av_strdup("out");
1728         inputs->filter_ctx = filt_out;
1729         inputs->pad_idx = 0;
1730         inputs->next    = NULL;
1731
1732         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1733             goto the_end;
1734         av_freep(&vfilters);
1735     } else {
1736         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1737             goto the_end;
1738     }
1739
1740     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1741         goto the_end;
1742
1743     is->out_video_filter = filt_out;
1744 the_end:
1745     return ret;
1746 }
1747
1748 #endif  /* CONFIG_AVFILTER */
1749
1750 static int video_thread(void *arg)
1751 {
1752     VideoState *is = arg;
1753     AVFrame *frame= avcodec_alloc_frame();
1754     int64_t pts_int;
1755     double pts;
1756     int ret;
1757
1758 #if CONFIG_AVFILTER
1759     AVFilterGraph *graph = avfilter_graph_alloc();
1760     AVFilterContext *filt_out = NULL;
1761     int64_t pos;
1762
1763     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1764         goto the_end;
1765     filt_out = is->out_video_filter;
1766 #endif
1767
1768     for(;;) {
1769 #if !CONFIG_AVFILTER
1770         AVPacket pkt;
1771 #else
1772         AVFilterBufferRef *picref;
1773         AVRational tb;
1774 #endif
1775         while (is->paused && !is->videoq.abort_request)
1776             SDL_Delay(10);
1777 #if CONFIG_AVFILTER
1778         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1779         if (picref) {
1780             pts_int = picref->pts;
1781             pos     = picref->pos;
1782             frame->opaque = picref;
1783         }
1784
1785         if (av_cmp_q(tb, is->video_st->time_base)) {
1786             av_unused int64_t pts1 = pts_int;
1787             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1788             av_dlog(NULL, "video_thread(): "
1789                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1790                     tb.num, tb.den, pts1,
1791                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1792         }
1793 #else
1794         ret = get_video_frame(is, frame, &pts_int, &pkt);
1795 #endif
1796
1797         if (ret < 0) goto the_end;
1798
1799         if (!ret)
1800             continue;
1801
1802         pts = pts_int*av_q2d(is->video_st->time_base);
1803
1804 #if CONFIG_AVFILTER
1805         ret = output_picture2(is, frame, pts, pos);
1806 #else
1807         ret = output_picture2(is, frame, pts,  pkt.pos);
1808         av_free_packet(&pkt);
1809 #endif
1810         if (ret < 0)
1811             goto the_end;
1812
1813         if (step)
1814             if (cur_stream)
1815                 stream_pause(cur_stream);
1816     }
1817  the_end:
1818 #if CONFIG_AVFILTER
1819     avfilter_graph_free(&graph);
1820 #endif
1821     av_free(frame);
1822     return 0;
1823 }
1824
1825 static int subtitle_thread(void *arg)
1826 {
1827     VideoState *is = arg;
1828     SubPicture *sp;
1829     AVPacket pkt1, *pkt = &pkt1;
1830     int len1, got_subtitle;
1831     double pts;
1832     int i, j;
1833     int r, g, b, y, u, v, a;
1834
1835     for(;;) {
1836         while (is->paused && !is->subtitleq.abort_request) {
1837             SDL_Delay(10);
1838         }
1839         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1840             break;
1841
1842         if(pkt->data == flush_pkt.data){
1843             avcodec_flush_buffers(is->subtitle_st->codec);
1844             continue;
1845         }
1846         SDL_LockMutex(is->subpq_mutex);
1847         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1848                !is->subtitleq.abort_request) {
1849             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1850         }
1851         SDL_UnlockMutex(is->subpq_mutex);
1852
1853         if (is->subtitleq.abort_request)
1854             goto the_end;
1855
1856         sp = &is->subpq[is->subpq_windex];
1857
1858        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1859            this packet, if any */
1860         pts = 0;
1861         if (pkt->pts != AV_NOPTS_VALUE)
1862             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1863
1864         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1865                                     &sp->sub, &got_subtitle,
1866                                     pkt);
1867         if (got_subtitle && sp->sub.format == 0) {
1868             sp->pts = pts;
1869
1870             for (i = 0; i < sp->sub.num_rects; i++)
1871             {
1872                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1873                 {
1874                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1875                     y = RGB_TO_Y_CCIR(r, g, b);
1876                     u = RGB_TO_U_CCIR(r, g, b, 0);
1877                     v = RGB_TO_V_CCIR(r, g, b, 0);
1878                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1879                 }
1880             }
1881
1882             /* now we can update the picture count */
1883             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1884                 is->subpq_windex = 0;
1885             SDL_LockMutex(is->subpq_mutex);
1886             is->subpq_size++;
1887             SDL_UnlockMutex(is->subpq_mutex);
1888         }
1889         av_free_packet(pkt);
1890     }
1891  the_end:
1892     return 0;
1893 }
1894
1895 /* copy samples for viewing in editor window */
1896 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1897 {
1898     int size, len, channels;
1899
1900     channels = is->audio_st->codec->channels;
1901
1902     size = samples_size / sizeof(short);
1903     while (size > 0) {
1904         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1905         if (len > size)
1906             len = size;
1907         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1908         samples += len;
1909         is->sample_array_index += len;
1910         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1911             is->sample_array_index = 0;
1912         size -= len;
1913     }
1914 }
1915
1916 /* return the new audio buffer size (samples can be added or deleted
1917    to get better sync if video or external master clock) */
1918 static int synchronize_audio(VideoState *is, short *samples,
1919                              int samples_size1, double pts)
1920 {
1921     int n, samples_size;
1922     double ref_clock;
1923
1924     n = 2 * is->audio_st->codec->channels;
1925     samples_size = samples_size1;
1926
1927     /* if not master, then we try to remove or add samples to correct the clock */
1928     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1929          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1930         double diff, avg_diff;
1931         int wanted_size, min_size, max_size, nb_samples;
1932
1933         ref_clock = get_master_clock(is);
1934         diff = get_audio_clock(is) - ref_clock;
1935
1936         if (diff < AV_NOSYNC_THRESHOLD) {
1937             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1938             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1939                 /* not enough measures to have a correct estimate */
1940                 is->audio_diff_avg_count++;
1941             } else {
1942                 /* estimate the A-V difference */
1943                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1944
1945                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1946                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1947                     nb_samples = samples_size / n;
1948
1949                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1950                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1951                     if (wanted_size < min_size)
1952                         wanted_size = min_size;
1953                     else if (wanted_size > max_size)
1954                         wanted_size = max_size;
1955
1956                     /* add or remove samples to correction the synchro */
1957                     if (wanted_size < samples_size) {
1958                         /* remove samples */
1959                         samples_size = wanted_size;
1960                     } else if (wanted_size > samples_size) {
1961                         uint8_t *samples_end, *q;
1962                         int nb;
1963
1964                         /* add samples */
1965                         nb = (samples_size - wanted_size);
1966                         samples_end = (uint8_t *)samples + samples_size - n;
1967                         q = samples_end + n;
1968                         while (nb > 0) {
1969                             memcpy(q, samples_end, n);
1970                             q += n;
1971                             nb -= n;
1972                         }
1973                         samples_size = wanted_size;
1974                     }
1975                 }
1976                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1977                         diff, avg_diff, samples_size - samples_size1,
1978                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1979             }
1980         } else {
1981             /* too big difference : may be initial PTS errors, so
1982                reset A-V filter */
1983             is->audio_diff_avg_count = 0;
1984             is->audio_diff_cum = 0;
1985         }
1986     }
1987
1988     return samples_size;
1989 }
1990
1991 /* decode one audio frame and returns its uncompressed size */
1992 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1993 {
1994     AVPacket *pkt_temp = &is->audio_pkt_temp;
1995     AVPacket *pkt = &is->audio_pkt;
1996     AVCodecContext *dec= is->audio_st->codec;
1997     int n, len1, data_size;
1998     double pts;
1999
2000     for(;;) {
2001         /* NOTE: the audio packet can contain several frames */
2002         while (pkt_temp->size > 0) {
2003             data_size = sizeof(is->audio_buf1);
2004             len1 = avcodec_decode_audio3(dec,
2005                                         (int16_t *)is->audio_buf1, &data_size,
2006                                         pkt_temp);
2007             if (len1 < 0) {
2008                 /* if error, we skip the frame */
2009                 pkt_temp->size = 0;
2010                 break;
2011             }
2012
2013             pkt_temp->data += len1;
2014             pkt_temp->size -= len1;
2015             if (data_size <= 0)
2016                 continue;
2017
2018             if (dec->sample_fmt != is->audio_src_fmt) {
2019                 if (is->reformat_ctx)
2020                     av_audio_convert_free(is->reformat_ctx);
2021                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2022                                                          dec->sample_fmt, 1, NULL, 0);
2023                 if (!is->reformat_ctx) {
2024                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2025                         av_get_sample_fmt_name(dec->sample_fmt),
2026                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2027                         break;
2028                 }
2029                 is->audio_src_fmt= dec->sample_fmt;
2030             }
2031
2032             if (is->reformat_ctx) {
2033                 const void *ibuf[6]= {is->audio_buf1};
2034                 void *obuf[6]= {is->audio_buf2};
2035                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2036                 int ostride[6]= {2};
2037                 int len= data_size/istride[0];
2038                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2039                     printf("av_audio_convert() failed\n");
2040                     break;
2041                 }
2042                 is->audio_buf= is->audio_buf2;
2043                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2044                           remove this legacy cruft */
2045                 data_size= len*2;
2046             }else{
2047                 is->audio_buf= is->audio_buf1;
2048             }
2049
2050             /* if no pts, then compute it */
2051             pts = is->audio_clock;
2052             *pts_ptr = pts;
2053             n = 2 * dec->channels;
2054             is->audio_clock += (double)data_size /
2055                 (double)(n * dec->sample_rate);
2056 #ifdef DEBUG
2057             {
2058                 static double last_clock;
2059                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2060                        is->audio_clock - last_clock,
2061                        is->audio_clock, pts);
2062                 last_clock = is->audio_clock;
2063             }
2064 #endif
2065             return data_size;
2066         }
2067
2068         /* free the current packet */
2069         if (pkt->data)
2070             av_free_packet(pkt);
2071
2072         if (is->paused || is->audioq.abort_request) {
2073             return -1;
2074         }
2075
2076         /* read next packet */
2077         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2078             return -1;
2079         if(pkt->data == flush_pkt.data){
2080             avcodec_flush_buffers(dec);
2081             continue;
2082         }
2083
2084         pkt_temp->data = pkt->data;
2085         pkt_temp->size = pkt->size;
2086
2087         /* if update the audio clock with the pts */
2088         if (pkt->pts != AV_NOPTS_VALUE) {
2089             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2090         }
2091     }
2092 }
2093
2094 /* prepare a new audio buffer */
2095 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2096 {
2097     VideoState *is = opaque;
2098     int audio_size, len1;
2099     double pts;
2100
2101     audio_callback_time = av_gettime();
2102
2103     while (len > 0) {
2104         if (is->audio_buf_index >= is->audio_buf_size) {
2105            audio_size = audio_decode_frame(is, &pts);
2106            if (audio_size < 0) {
2107                 /* if error, just output silence */
2108                is->audio_buf = is->audio_buf1;
2109                is->audio_buf_size = 1024;
2110                memset(is->audio_buf, 0, is->audio_buf_size);
2111            } else {
2112                if (is->show_audio)
2113                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2114                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2115                                               pts);
2116                is->audio_buf_size = audio_size;
2117            }
2118            is->audio_buf_index = 0;
2119         }
2120         len1 = is->audio_buf_size - is->audio_buf_index;
2121         if (len1 > len)
2122             len1 = len;
2123         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2124         len -= len1;
2125         stream += len1;
2126         is->audio_buf_index += len1;
2127     }
2128 }
2129
2130 /* open a given stream. Return 0 if OK */
2131 static int stream_component_open(VideoState *is, int stream_index)
2132 {
2133     AVFormatContext *ic = is->ic;
2134     AVCodecContext *avctx;
2135     AVCodec *codec;
2136     SDL_AudioSpec wanted_spec, spec;
2137
2138     if (stream_index < 0 || stream_index >= ic->nb_streams)
2139         return -1;
2140     avctx = ic->streams[stream_index]->codec;
2141
2142     /* prepare audio output */
2143     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2144         if (avctx->channels > 0) {
2145             avctx->request_channels = FFMIN(2, avctx->channels);
2146         } else {
2147             avctx->request_channels = 2;
2148         }
2149     }
2150
2151     codec = avcodec_find_decoder(avctx->codec_id);
2152     avctx->debug_mv = debug_mv;
2153     avctx->debug = debug;
2154     avctx->workaround_bugs = workaround_bugs;
2155     avctx->lowres = lowres;
2156     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2157     avctx->idct_algo= idct;
2158     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2159     avctx->skip_frame= skip_frame;
2160     avctx->skip_idct= skip_idct;
2161     avctx->skip_loop_filter= skip_loop_filter;
2162     avctx->error_recognition= error_recognition;
2163     avctx->error_concealment= error_concealment;
2164     avctx->thread_count= thread_count;
2165
2166     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2167
2168     if (!codec ||
2169         avcodec_open(avctx, codec) < 0)
2170         return -1;
2171
2172     /* prepare audio output */
2173     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2174         wanted_spec.freq = avctx->sample_rate;
2175         wanted_spec.format = AUDIO_S16SYS;
2176         wanted_spec.channels = avctx->channels;
2177         wanted_spec.silence = 0;
2178         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2179         wanted_spec.callback = sdl_audio_callback;
2180         wanted_spec.userdata = is;
2181         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2182             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2183             return -1;
2184         }
2185         is->audio_hw_buf_size = spec.size;
2186         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2187     }
2188
2189     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2190     switch(avctx->codec_type) {
2191     case AVMEDIA_TYPE_AUDIO:
2192         is->audio_stream = stream_index;
2193         is->audio_st = ic->streams[stream_index];
2194         is->audio_buf_size = 0;
2195         is->audio_buf_index = 0;
2196
2197         /* init averaging filter */
2198         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2199         is->audio_diff_avg_count = 0;
2200         /* since we do not have a precise anough audio fifo fullness,
2201            we correct audio sync only if larger than this threshold */
2202         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2203
2204         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2205         packet_queue_init(&is->audioq);
2206         SDL_PauseAudio(0);
2207         break;
2208     case AVMEDIA_TYPE_VIDEO:
2209         is->video_stream = stream_index;
2210         is->video_st = ic->streams[stream_index];
2211
2212         packet_queue_init(&is->videoq);
2213         is->video_tid = SDL_CreateThread(video_thread, is);
2214         break;
2215     case AVMEDIA_TYPE_SUBTITLE:
2216         is->subtitle_stream = stream_index;
2217         is->subtitle_st = ic->streams[stream_index];
2218         packet_queue_init(&is->subtitleq);
2219
2220         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2221         break;
2222     default:
2223         break;
2224     }
2225     return 0;
2226 }
2227
2228 static void stream_component_close(VideoState *is, int stream_index)
2229 {
2230     AVFormatContext *ic = is->ic;
2231     AVCodecContext *avctx;
2232
2233     if (stream_index < 0 || stream_index >= ic->nb_streams)
2234         return;
2235     avctx = ic->streams[stream_index]->codec;
2236
2237     switch(avctx->codec_type) {
2238     case AVMEDIA_TYPE_AUDIO:
2239         packet_queue_abort(&is->audioq);
2240
2241         SDL_CloseAudio();
2242
2243         packet_queue_end(&is->audioq);
2244         if (is->reformat_ctx)
2245             av_audio_convert_free(is->reformat_ctx);
2246         is->reformat_ctx = NULL;
2247         break;
2248     case AVMEDIA_TYPE_VIDEO:
2249         packet_queue_abort(&is->videoq);
2250
2251         /* note: we also signal this mutex to make sure we deblock the
2252            video thread in all cases */
2253         SDL_LockMutex(is->pictq_mutex);
2254         SDL_CondSignal(is->pictq_cond);
2255         SDL_UnlockMutex(is->pictq_mutex);
2256
2257         SDL_WaitThread(is->video_tid, NULL);
2258
2259         packet_queue_end(&is->videoq);
2260         break;
2261     case AVMEDIA_TYPE_SUBTITLE:
2262         packet_queue_abort(&is->subtitleq);
2263
2264         /* note: we also signal this mutex to make sure we deblock the
2265            video thread in all cases */
2266         SDL_LockMutex(is->subpq_mutex);
2267         is->subtitle_stream_changed = 1;
2268
2269         SDL_CondSignal(is->subpq_cond);
2270         SDL_UnlockMutex(is->subpq_mutex);
2271
2272         SDL_WaitThread(is->subtitle_tid, NULL);
2273
2274         packet_queue_end(&is->subtitleq);
2275         break;
2276     default:
2277         break;
2278     }
2279
2280     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2281     avcodec_close(avctx);
2282     switch(avctx->codec_type) {
2283     case AVMEDIA_TYPE_AUDIO:
2284         is->audio_st = NULL;
2285         is->audio_stream = -1;
2286         break;
2287     case AVMEDIA_TYPE_VIDEO:
2288         is->video_st = NULL;
2289         is->video_stream = -1;
2290         break;
2291     case AVMEDIA_TYPE_SUBTITLE:
2292         is->subtitle_st = NULL;
2293         is->subtitle_stream = -1;
2294         break;
2295     default:
2296         break;
2297     }
2298 }
2299
2300 /* since we have only one decoding thread, we can use a global
2301    variable instead of a thread local variable */
2302 static VideoState *global_video_state;
2303
2304 static int decode_interrupt_cb(void)
2305 {
2306     return (global_video_state && global_video_state->abort_request);
2307 }
2308
2309 /* this thread gets the stream from the disk or the network */
2310 static int decode_thread(void *arg)
2311 {
2312     VideoState *is = arg;
2313     AVFormatContext *ic;
2314     int err, i, ret;
2315     int st_index[AVMEDIA_TYPE_NB];
2316     AVPacket pkt1, *pkt = &pkt1;
2317     AVFormatParameters params, *ap = &params;
2318     int eof=0;
2319     int pkt_in_play_range = 0;
2320
2321     ic = avformat_alloc_context();
2322
2323     memset(st_index, -1, sizeof(st_index));
2324     is->video_stream = -1;
2325     is->audio_stream = -1;
2326     is->subtitle_stream = -1;
2327
2328     global_video_state = is;
2329     avio_set_interrupt_cb(decode_interrupt_cb);
2330
2331     memset(ap, 0, sizeof(*ap));
2332
2333     ap->prealloced_context = 1;
2334     ap->width = frame_width;
2335     ap->height= frame_height;
2336     ap->time_base= (AVRational){1, 25};
2337     ap->pix_fmt = frame_pix_fmt;
2338
2339     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2340
2341     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2342     if (err < 0) {
2343         print_error(is->filename, err);
2344         ret = -1;
2345         goto fail;
2346     }
2347     is->ic = ic;
2348
2349     if(genpts)
2350         ic->flags |= AVFMT_FLAG_GENPTS;
2351
2352     /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
2353     for (i = 0; i < ic->nb_streams; i++) {
2354         AVCodecContext *dec = ic->streams[i]->codec;
2355         switch (dec->codec_type) {
2356         case AVMEDIA_TYPE_AUDIO:
2357             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
2358                              AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2359                              NULL);
2360             break;
2361         case AVMEDIA_TYPE_VIDEO:
2362             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
2363                              AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
2364                              NULL);
2365             break;
2366         }
2367     }
2368
2369     err = av_find_stream_info(ic);
2370     if (err < 0) {
2371         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2372         ret = -1;
2373         goto fail;
2374     }
2375     if(ic->pb)
2376         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2377
2378     if(seek_by_bytes<0)
2379         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2380
2381     /* if seeking requested, we execute it */
2382     if (start_time != AV_NOPTS_VALUE) {
2383         int64_t timestamp;
2384
2385         timestamp = start_time;
2386         /* add the stream start time */
2387         if (ic->start_time != AV_NOPTS_VALUE)
2388             timestamp += ic->start_time;
2389         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2390         if (ret < 0) {
2391             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2392                     is->filename, (double)timestamp / AV_TIME_BASE);
2393         }
2394     }
2395
2396     for (i = 0; i < ic->nb_streams; i++)
2397         ic->streams[i]->discard = AVDISCARD_ALL;
2398     if (!video_disable)
2399         st_index[AVMEDIA_TYPE_VIDEO] =
2400             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2401                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2402     if (!audio_disable)
2403         st_index[AVMEDIA_TYPE_AUDIO] =
2404             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2405                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2406                                 st_index[AVMEDIA_TYPE_VIDEO],
2407                                 NULL, 0);
2408     if (!video_disable)
2409         st_index[AVMEDIA_TYPE_SUBTITLE] =
2410             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2411                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2412                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2413                                  st_index[AVMEDIA_TYPE_AUDIO] :
2414                                  st_index[AVMEDIA_TYPE_VIDEO]),
2415                                 NULL, 0);
2416     if (show_status) {
2417         av_dump_format(ic, 0, is->filename, 0);
2418     }
2419
2420     /* open the streams */
2421     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2422         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2423     }
2424
2425     ret=-1;
2426     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2427         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2428     }
2429     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2430     if(ret<0) {
2431         if (!display_disable)
2432             is->show_audio = 2;
2433     }
2434
2435     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2436         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2437     }
2438
2439     if (is->video_stream < 0 && is->audio_stream < 0) {
2440         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2441         ret = -1;
2442         goto fail;
2443     }
2444
2445     for(;;) {
2446         if (is->abort_request)
2447             break;
2448         if (is->paused != is->last_paused) {
2449             is->last_paused = is->paused;
2450             if (is->paused)
2451                 is->read_pause_return= av_read_pause(ic);
2452             else
2453                 av_read_play(ic);
2454         }
2455 #if CONFIG_RTSP_DEMUXER
2456         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2457             /* wait 10 ms to avoid trying to get another packet */
2458             /* XXX: horrible */
2459             SDL_Delay(10);
2460             continue;
2461         }
2462 #endif
2463         if (is->seek_req) {
2464             int64_t seek_target= is->seek_pos;
2465             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2466             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2467 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2468 //      of the seek_pos/seek_rel variables
2469
2470             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2471             if (ret < 0) {
2472                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2473             }else{
2474                 if (is->audio_stream >= 0) {
2475                     packet_queue_flush(&is->audioq);
2476                     packet_queue_put(&is->audioq, &flush_pkt);
2477                 }
2478                 if (is->subtitle_stream >= 0) {
2479                     packet_queue_flush(&is->subtitleq);
2480                     packet_queue_put(&is->subtitleq, &flush_pkt);
2481                 }
2482                 if (is->video_stream >= 0) {
2483                     packet_queue_flush(&is->videoq);
2484                     packet_queue_put(&is->videoq, &flush_pkt);
2485                 }
2486             }
2487             is->seek_req = 0;
2488             eof= 0;
2489         }
2490
2491         /* if the queue are full, no need to read more */
2492         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2493             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2494                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2495                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2496             /* wait 10 ms */
2497             SDL_Delay(10);
2498             continue;
2499         }
2500         if(eof) {
2501             if(is->video_stream >= 0){
2502                 av_init_packet(pkt);
2503                 pkt->data=NULL;
2504                 pkt->size=0;
2505                 pkt->stream_index= is->video_stream;
2506                 packet_queue_put(&is->videoq, pkt);
2507             }
2508             SDL_Delay(10);
2509             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2510                 if(loop!=1 && (!loop || --loop)){
2511                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2512                 }else if(autoexit){
2513                     ret=AVERROR_EOF;
2514                     goto fail;
2515                 }
2516             }
2517             continue;
2518         }
2519         ret = av_read_frame(ic, pkt);
2520         if (ret < 0) {
2521             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2522                 eof=1;
2523             if (ic->pb && ic->pb->error)
2524                 break;
2525             SDL_Delay(100); /* wait for user event */
2526             continue;
2527         }
2528         /* check if packet is in play range specified by user, then queue, otherwise discard */
2529         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2530                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2531                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2532                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2533                 <= ((double)duration/1000000);
2534         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2535             packet_queue_put(&is->audioq, pkt);
2536         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2537             packet_queue_put(&is->videoq, pkt);
2538         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2539             packet_queue_put(&is->subtitleq, pkt);
2540         } else {
2541             av_free_packet(pkt);
2542         }
2543     }
2544     /* wait until the end */
2545     while (!is->abort_request) {
2546         SDL_Delay(100);
2547     }
2548
2549     ret = 0;
2550  fail:
2551     /* disable interrupting */
2552     global_video_state = NULL;
2553
2554     /* close each stream */
2555     if (is->audio_stream >= 0)
2556         stream_component_close(is, is->audio_stream);
2557     if (is->video_stream >= 0)
2558         stream_component_close(is, is->video_stream);
2559     if (is->subtitle_stream >= 0)
2560         stream_component_close(is, is->subtitle_stream);
2561     if (is->ic) {
2562         av_close_input_file(is->ic);
2563         is->ic = NULL; /* safety */
2564     }
2565     avio_set_interrupt_cb(NULL);
2566
2567     if (ret != 0) {
2568         SDL_Event event;
2569
2570         event.type = FF_QUIT_EVENT;
2571         event.user.data1 = is;
2572         SDL_PushEvent(&event);
2573     }
2574     return 0;
2575 }
2576
2577 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2578 {
2579     VideoState *is;
2580
2581     is = av_mallocz(sizeof(VideoState));
2582     if (!is)
2583         return NULL;
2584     av_strlcpy(is->filename, filename, sizeof(is->filename));
2585     is->iformat = iformat;
2586     is->ytop = 0;
2587     is->xleft = 0;
2588
2589     /* start video display */
2590     is->pictq_mutex = SDL_CreateMutex();
2591     is->pictq_cond = SDL_CreateCond();
2592
2593     is->subpq_mutex = SDL_CreateMutex();
2594     is->subpq_cond = SDL_CreateCond();
2595
2596     is->av_sync_type = av_sync_type;
2597     is->parse_tid = SDL_CreateThread(decode_thread, is);
2598     if (!is->parse_tid) {
2599         av_free(is);
2600         return NULL;
2601     }
2602     return is;
2603 }
2604
2605 static void stream_cycle_channel(VideoState *is, int codec_type)
2606 {
2607     AVFormatContext *ic = is->ic;
2608     int start_index, stream_index;
2609     AVStream *st;
2610
2611     if (codec_type == AVMEDIA_TYPE_VIDEO)
2612         start_index = is->video_stream;
2613     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2614         start_index = is->audio_stream;
2615     else
2616         start_index = is->subtitle_stream;
2617     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2618         return;
2619     stream_index = start_index;
2620     for(;;) {
2621         if (++stream_index >= is->ic->nb_streams)
2622         {
2623             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2624             {
2625                 stream_index = -1;
2626                 goto the_end;
2627             } else
2628                 stream_index = 0;
2629         }
2630         if (stream_index == start_index)
2631             return;
2632         st = ic->streams[stream_index];
2633         if (st->codec->codec_type == codec_type) {
2634             /* check that parameters are OK */
2635             switch(codec_type) {
2636             case AVMEDIA_TYPE_AUDIO:
2637                 if (st->codec->sample_rate != 0 &&
2638                     st->codec->channels != 0)
2639                     goto the_end;
2640                 break;
2641             case AVMEDIA_TYPE_VIDEO:
2642             case AVMEDIA_TYPE_SUBTITLE:
2643                 goto the_end;
2644             default:
2645                 break;
2646             }
2647         }
2648     }
2649  the_end:
2650     stream_component_close(is, start_index);
2651     stream_component_open(is, stream_index);
2652 }
2653
2654
2655 static void toggle_full_screen(void)
2656 {
2657     is_full_screen = !is_full_screen;
2658     video_open(cur_stream);
2659 }
2660
2661 static void toggle_pause(void)
2662 {
2663     if (cur_stream)
2664         stream_pause(cur_stream);
2665     step = 0;
2666 }
2667
2668 static void step_to_next_frame(void)
2669 {
2670     if (cur_stream) {
2671         /* if the stream is paused unpause it, then step */
2672         if (cur_stream->paused)
2673             stream_pause(cur_stream);
2674     }
2675     step = 1;
2676 }
2677
2678 static void toggle_audio_display(void)
2679 {
2680     if (cur_stream) {
2681         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2682         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2683         fill_rectangle(screen,
2684                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2685                     bgcolor);
2686         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2687     }
2688 }
2689
2690 /* handle an event sent by the GUI */
2691 static void event_loop(void)
2692 {
2693     SDL_Event event;
2694     double incr, pos, frac;
2695
2696     for(;;) {
2697         double x;
2698         SDL_WaitEvent(&event);
2699         switch(event.type) {
2700         case SDL_KEYDOWN:
2701             if (exit_on_keydown) {
2702                 do_exit();
2703                 break;
2704             }
2705             switch(event.key.keysym.sym) {
2706             case SDLK_ESCAPE:
2707             case SDLK_q:
2708                 do_exit();
2709                 break;
2710             case SDLK_f:
2711                 toggle_full_screen();
2712                 break;
2713             case SDLK_p:
2714             case SDLK_SPACE:
2715                 toggle_pause();
2716                 break;
2717             case SDLK_s: //S: Step to next frame
2718                 step_to_next_frame();
2719                 break;
2720             case SDLK_a:
2721                 if (cur_stream)
2722                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2723                 break;
2724             case SDLK_v:
2725                 if (cur_stream)
2726                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2727                 break;
2728             case SDLK_t:
2729                 if (cur_stream)
2730                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2731                 break;
2732             case SDLK_w:
2733                 toggle_audio_display();
2734                 break;
2735             case SDLK_LEFT:
2736                 incr = -10.0;
2737                 goto do_seek;
2738             case SDLK_RIGHT:
2739                 incr = 10.0;
2740                 goto do_seek;
2741             case SDLK_UP:
2742                 incr = 60.0;
2743                 goto do_seek;
2744             case SDLK_DOWN:
2745                 incr = -60.0;
2746             do_seek:
2747                 if (cur_stream) {
2748                     if (seek_by_bytes) {
2749                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2750                             pos= cur_stream->video_current_pos;
2751                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2752                             pos= cur_stream->audio_pkt.pos;
2753                         }else
2754                             pos = avio_tell(cur_stream->ic->pb);
2755                         if (cur_stream->ic->bit_rate)
2756                             incr *= cur_stream->ic->bit_rate / 8.0;
2757                         else
2758                             incr *= 180000.0;
2759                         pos += incr;
2760                         stream_seek(cur_stream, pos, incr, 1);
2761                     } else {
2762                         pos = get_master_clock(cur_stream);
2763                         pos += incr;
2764                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2765                     }
2766                 }
2767                 break;
2768             default:
2769                 break;
2770             }
2771             break;
2772         case SDL_MOUSEBUTTONDOWN:
2773             if (exit_on_mousedown) {
2774                 do_exit();
2775                 break;
2776             }
2777         case SDL_MOUSEMOTION:
2778             if(event.type ==SDL_MOUSEBUTTONDOWN){
2779                 x= event.button.x;
2780             }else{
2781                 if(event.motion.state != SDL_PRESSED)
2782                     break;
2783                 x= event.motion.x;
2784             }
2785             if (cur_stream) {
2786                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2787                     uint64_t size=  avio_size(cur_stream->ic->pb);
2788                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2789                 }else{
2790                     int64_t ts;
2791                     int ns, hh, mm, ss;
2792                     int tns, thh, tmm, tss;
2793                     tns = cur_stream->ic->duration/1000000LL;
2794                     thh = tns/3600;
2795                     tmm = (tns%3600)/60;
2796                     tss = (tns%60);
2797                     frac = x/cur_stream->width;
2798                     ns = frac*tns;
2799                     hh = ns/3600;
2800                     mm = (ns%3600)/60;
2801                     ss = (ns%60);
2802                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2803                             hh, mm, ss, thh, tmm, tss);
2804                     ts = frac*cur_stream->ic->duration;
2805                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2806                         ts += cur_stream->ic->start_time;
2807                     stream_seek(cur_stream, ts, 0, 0);
2808                 }
2809             }
2810             break;
2811         case SDL_VIDEORESIZE:
2812             if (cur_stream) {
2813                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2814                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2815                 screen_width = cur_stream->width = event.resize.w;
2816                 screen_height= cur_stream->height= event.resize.h;
2817             }
2818             break;
2819         case SDL_QUIT:
2820         case FF_QUIT_EVENT:
2821             do_exit();
2822             break;
2823         case FF_ALLOC_EVENT:
2824             video_open(event.user.data1);
2825             alloc_picture(event.user.data1);
2826             break;
2827         case FF_REFRESH_EVENT:
2828             video_refresh_timer(event.user.data1);
2829             cur_stream->refresh=0;
2830             break;
2831         default:
2832             break;
2833         }
2834     }
2835 }
2836
2837 static int opt_frame_size(const char *opt, const char *arg)
2838 {
2839     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2840         fprintf(stderr, "Incorrect frame size\n");
2841         return AVERROR(EINVAL);
2842     }
2843     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2844         fprintf(stderr, "Frame size must be a multiple of 2\n");
2845         return AVERROR(EINVAL);
2846     }
2847     return 0;
2848 }
2849
2850 static int opt_width(const char *opt, const char *arg)
2851 {
2852     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2853     return 0;
2854 }
2855
2856 static int opt_height(const char *opt, const char *arg)
2857 {
2858     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2859     return 0;
2860 }
2861
2862 static int opt_format(const char *opt, const char *arg)
2863 {
2864     file_iformat = av_find_input_format(arg);
2865     if (!file_iformat) {
2866         fprintf(stderr, "Unknown input format: %s\n", arg);
2867         return AVERROR(EINVAL);
2868     }
2869     return 0;
2870 }
2871
2872 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2873 {
2874     frame_pix_fmt = av_get_pix_fmt(arg);
2875     return 0;
2876 }
2877
2878 static int opt_sync(const char *opt, const char *arg)
2879 {
2880     if (!strcmp(arg, "audio"))
2881         av_sync_type = AV_SYNC_AUDIO_MASTER;
2882     else if (!strcmp(arg, "video"))
2883         av_sync_type = AV_SYNC_VIDEO_MASTER;
2884     else if (!strcmp(arg, "ext"))
2885         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2886     else {
2887         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2888         exit(1);
2889     }
2890     return 0;
2891 }
2892
2893 static int opt_seek(const char *opt, const char *arg)
2894 {
2895     start_time = parse_time_or_die(opt, arg, 1);
2896     return 0;
2897 }
2898
2899 static int opt_duration(const char *opt, const char *arg)
2900 {
2901     duration = parse_time_or_die(opt, arg, 1);
2902     return 0;
2903 }
2904
2905 static int opt_debug(const char *opt, const char *arg)
2906 {
2907     av_log_set_level(99);
2908     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2909     return 0;
2910 }
2911
2912 static int opt_vismv(const char *opt, const char *arg)
2913 {
2914     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2915     return 0;
2916 }
2917
2918 static int opt_thread_count(const char *opt, const char *arg)
2919 {
2920     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2921 #if !HAVE_THREADS
2922     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2923 #endif
2924     return 0;
2925 }
2926
2927 static const OptionDef options[] = {
2928 #include "cmdutils_common_opts.h"
2929     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2930     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2931     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2932     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2933     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2934     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2935     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2936     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2937     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2938     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2939     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2940     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2941     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2942     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2943     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2944     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2945     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2946     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2947     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2948     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2949     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2950     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2951     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2952     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2953     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2954     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2955     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2956     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2957     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2958     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2959     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2960     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2961     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2962     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2963     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2964     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2965     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2966 #if CONFIG_AVFILTER
2967     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2968 #endif
2969     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2970     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2971     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
2972     { NULL, },
2973 };
2974
2975 static void show_usage(void)
2976 {
2977     printf("Simple media player\n");
2978     printf("usage: ffplay [options] input_file\n");
2979     printf("\n");
2980 }
2981
2982 static void show_help(void)
2983 {
2984     av_log_set_callback(log_callback_help);
2985     show_usage();
2986     show_help_options(options, "Main options:\n",
2987                       OPT_EXPERT, 0);
2988     show_help_options(options, "\nAdvanced options:\n",
2989                       OPT_EXPERT, OPT_EXPERT);
2990     printf("\n");
2991     av_opt_show2(avcodec_opts[0], NULL,
2992                  AV_OPT_FLAG_DECODING_PARAM, 0);
2993     printf("\n");
2994     av_opt_show2(avformat_opts, NULL,
2995                  AV_OPT_FLAG_DECODING_PARAM, 0);
2996 #if !CONFIG_AVFILTER
2997     printf("\n");
2998     av_opt_show2(sws_opts, NULL,
2999                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3000 #endif
3001     printf("\nWhile playing:\n"
3002            "q, ESC              quit\n"
3003            "f                   toggle full screen\n"
3004            "p, SPC              pause\n"
3005            "a                   cycle audio channel\n"
3006            "v                   cycle video channel\n"
3007            "t                   cycle subtitle channel\n"
3008            "w                   show audio waves\n"
3009            "s                   activate frame-step mode\n"
3010            "left/right          seek backward/forward 10 seconds\n"
3011            "down/up             seek backward/forward 1 minute\n"
3012            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3013            );
3014 }
3015
3016 static void opt_input_file(const char *filename)
3017 {
3018     if (input_filename) {
3019         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3020                 filename, input_filename);
3021         exit(1);
3022     }
3023     if (!strcmp(filename, "-"))
3024         filename = "pipe:";
3025     input_filename = filename;
3026 }
3027
3028 /* Called from the main */
3029 int main(int argc, char **argv)
3030 {
3031     int flags;
3032
3033     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3034
3035     /* register all codecs, demux and protocols */
3036     avcodec_register_all();
3037 #if CONFIG_AVDEVICE
3038     avdevice_register_all();
3039 #endif
3040 #if CONFIG_AVFILTER
3041     avfilter_register_all();
3042 #endif
3043     av_register_all();
3044
3045     init_opts();
3046
3047     show_banner();
3048
3049     parse_options(argc, argv, options, opt_input_file);
3050
3051     if (!input_filename) {
3052         show_usage();
3053         fprintf(stderr, "An input file must be specified\n");
3054         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3055         exit(1);
3056     }
3057
3058     if (display_disable) {
3059         video_disable = 1;
3060     }
3061     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3062 #if !defined(__MINGW32__) && !defined(__APPLE__)
3063     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3064 #endif
3065     if (SDL_Init (flags)) {
3066         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3067         exit(1);
3068     }
3069
3070     if (!display_disable) {
3071 #if HAVE_SDL_VIDEO_SIZE
3072         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3073         fs_screen_width = vi->current_w;
3074         fs_screen_height = vi->current_h;
3075 #endif
3076     }
3077
3078     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3079     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3080     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3081
3082     av_init_packet(&flush_pkt);
3083     flush_pkt.data= "FLUSH";
3084
3085     cur_stream = stream_open(input_filename, file_iformat);
3086
3087     event_loop();
3088
3089     /* never returns */
3090
3091     return 0;
3092 }