ffplay: remove -debug option
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include <SDL.h>
47 #include <SDL_thread.h>
48
49 #include "cmdutils.h"
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "ffplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG
58 //#define DEBUG_SYNC
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static void show_help(void);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int frame_width = 0;
227 static int frame_height = 0;
228 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int step = 0;
243 static int thread_count = 1;
244 static int workaround_bugs = 1;
245 static int fast = 0;
246 static int genpts = 0;
247 static int lowres = 0;
248 static int idct = FF_IDCT_AUTO;
249 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
252 static int error_recognition = FF_ER_CAREFUL;
253 static int error_concealment = 3;
254 static int decoder_reorder_pts= -1;
255 static int autoexit;
256 static int exit_on_keydown;
257 static int exit_on_mousedown;
258 static int loop=1;
259 static int framedrop=1;
260 static enum ShowMode show_mode = SHOW_MODE_NONE;
261
262 static int rdftspeed=20;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static VideoState *cur_stream;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
281 {
282     AVPacketList *pkt1;
283
284     /* duplicate the packet */
285     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
286         return -1;
287
288     pkt1 = av_malloc(sizeof(AVPacketList));
289     if (!pkt1)
290         return -1;
291     pkt1->pkt = *pkt;
292     pkt1->next = NULL;
293
294
295     SDL_LockMutex(q->mutex);
296
297     if (!q->last_pkt)
298
299         q->first_pkt = pkt1;
300     else
301         q->last_pkt->next = pkt1;
302     q->last_pkt = pkt1;
303     q->nb_packets++;
304     q->size += pkt1->pkt.size + sizeof(*pkt1);
305     /* XXX: should duplicate packet data in DV case */
306     SDL_CondSignal(q->cond);
307
308     SDL_UnlockMutex(q->mutex);
309     return 0;
310 }
311
312 /* packet queue handling */
313 static void packet_queue_init(PacketQueue *q)
314 {
315     memset(q, 0, sizeof(PacketQueue));
316     q->mutex = SDL_CreateMutex();
317     q->cond = SDL_CreateCond();
318     packet_queue_put(q, &flush_pkt);
319 }
320
321 static void packet_queue_flush(PacketQueue *q)
322 {
323     AVPacketList *pkt, *pkt1;
324
325     SDL_LockMutex(q->mutex);
326     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
327         pkt1 = pkt->next;
328         av_free_packet(&pkt->pkt);
329         av_freep(&pkt);
330     }
331     q->last_pkt = NULL;
332     q->first_pkt = NULL;
333     q->nb_packets = 0;
334     q->size = 0;
335     SDL_UnlockMutex(q->mutex);
336 }
337
338 static void packet_queue_end(PacketQueue *q)
339 {
340     packet_queue_flush(q);
341     SDL_DestroyMutex(q->mutex);
342     SDL_DestroyCond(q->cond);
343 }
344
345 static void packet_queue_abort(PacketQueue *q)
346 {
347     SDL_LockMutex(q->mutex);
348
349     q->abort_request = 1;
350
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
357 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
358 {
359     AVPacketList *pkt1;
360     int ret;
361
362     SDL_LockMutex(q->mutex);
363
364     for(;;) {
365         if (q->abort_request) {
366             ret = -1;
367             break;
368         }
369
370         pkt1 = q->first_pkt;
371         if (pkt1) {
372             q->first_pkt = pkt1->next;
373             if (!q->first_pkt)
374                 q->last_pkt = NULL;
375             q->nb_packets--;
376             q->size -= pkt1->pkt.size + sizeof(*pkt1);
377             *pkt = pkt1->pkt;
378             av_free(pkt1);
379             ret = 1;
380             break;
381         } else if (!block) {
382             ret = 0;
383             break;
384         } else {
385             SDL_CondWait(q->cond, q->mutex);
386         }
387     }
388     SDL_UnlockMutex(q->mutex);
389     return ret;
390 }
391
392 static inline void fill_rectangle(SDL_Surface *screen,
393                                   int x, int y, int w, int h, int color)
394 {
395     SDL_Rect rect;
396     rect.x = x;
397     rect.y = y;
398     rect.w = w;
399     rect.h = h;
400     SDL_FillRect(screen, &rect, color);
401 }
402
403 #define ALPHA_BLEND(a, oldp, newp, s)\
404 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
405
406 #define RGBA_IN(r, g, b, a, s)\
407 {\
408     unsigned int v = ((const uint32_t *)(s))[0];\
409     a = (v >> 24) & 0xff;\
410     r = (v >> 16) & 0xff;\
411     g = (v >> 8) & 0xff;\
412     b = v & 0xff;\
413 }
414
415 #define YUVA_IN(y, u, v, a, s, pal)\
416 {\
417     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
418     a = (val >> 24) & 0xff;\
419     y = (val >> 16) & 0xff;\
420     u = (val >> 8) & 0xff;\
421     v = val & 0xff;\
422 }
423
424 #define YUVA_OUT(d, y, u, v, a)\
425 {\
426     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 }
428
429
430 #define BPP 1
431
432 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
433 {
434     int wrap, wrap3, width2, skip2;
435     int y, u, v, a, u1, v1, a1, w, h;
436     uint8_t *lum, *cb, *cr;
437     const uint8_t *p;
438     const uint32_t *pal;
439     int dstx, dsty, dstw, dsth;
440
441     dstw = av_clip(rect->w, 0, imgw);
442     dsth = av_clip(rect->h, 0, imgh);
443     dstx = av_clip(rect->x, 0, imgw - dstw);
444     dsty = av_clip(rect->y, 0, imgh - dsth);
445     lum = dst->data[0] + dsty * dst->linesize[0];
446     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
447     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
448
449     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
450     skip2 = dstx >> 1;
451     wrap = dst->linesize[0];
452     wrap3 = rect->pict.linesize[0];
453     p = rect->pict.data[0];
454     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
455
456     if (dsty & 1) {
457         lum += dstx;
458         cb += skip2;
459         cr += skip2;
460
461         if (dstx & 1) {
462             YUVA_IN(y, u, v, a, p, pal);
463             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
465             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
466             cb++;
467             cr++;
468             lum++;
469             p += BPP;
470         }
471         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
472             YUVA_IN(y, u, v, a, p, pal);
473             u1 = u;
474             v1 = v;
475             a1 = a;
476             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
477
478             YUVA_IN(y, u, v, a, p + BPP, pal);
479             u1 += u;
480             v1 += v;
481             a1 += a;
482             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
483             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
484             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
485             cb++;
486             cr++;
487             p += 2 * BPP;
488             lum += 2;
489         }
490         if (w) {
491             YUVA_IN(y, u, v, a, p, pal);
492             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
493             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
494             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
495             p++;
496             lum++;
497         }
498         p += wrap3 - dstw * BPP;
499         lum += wrap - dstw - dstx;
500         cb += dst->linesize[1] - width2 - skip2;
501         cr += dst->linesize[2] - width2 - skip2;
502     }
503     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             u1 = u;
511             v1 = v;
512             a1 = a;
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             p += wrap3;
515             lum += wrap;
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 += u;
518             v1 += v;
519             a1 += a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523             cb++;
524             cr++;
525             p += -wrap3 + BPP;
526             lum += -wrap + 1;
527         }
528         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 = u;
531             v1 = v;
532             a1 = a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535             YUVA_IN(y, u, v, a, p + BPP, pal);
536             u1 += u;
537             v1 += v;
538             a1 += a;
539             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540             p += wrap3;
541             lum += wrap;
542
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554
555             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557
558             cb++;
559             cr++;
560             p += -wrap3 + 2 * BPP;
561             lum += -wrap + 2;
562         }
563         if (w) {
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 = u;
566             v1 = v;
567             a1 = a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             p += wrap3;
570             lum += wrap;
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 += u;
573             v1 += v;
574             a1 += a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578             cb++;
579             cr++;
580             p += -wrap3 + BPP;
581             lum += -wrap + 1;
582         }
583         p += wrap3 + (wrap3 - dstw * BPP);
584         lum += wrap + (wrap - dstw - dstx);
585         cb += dst->linesize[1] - width2 - skip2;
586         cr += dst->linesize[2] - width2 - skip2;
587     }
588     /* handle odd height */
589     if (h) {
590         lum += dstx;
591         cb += skip2;
592         cr += skip2;
593
594         if (dstx & 1) {
595             YUVA_IN(y, u, v, a, p, pal);
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599             cb++;
600             cr++;
601             lum++;
602             p += BPP;
603         }
604         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605             YUVA_IN(y, u, v, a, p, pal);
606             u1 = u;
607             v1 = v;
608             a1 = a;
609             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610
611             YUVA_IN(y, u, v, a, p + BPP, pal);
612             u1 += u;
613             v1 += v;
614             a1 += a;
615             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618             cb++;
619             cr++;
620             p += 2 * BPP;
621             lum += 2;
622         }
623         if (w) {
624             YUVA_IN(y, u, v, a, p, pal);
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628         }
629     }
630 }
631
632 static void free_subpicture(SubPicture *sp)
633 {
634     avsubtitle_free(&sp->sub);
635 }
636
637 static void video_image_display(VideoState *is)
638 {
639     VideoPicture *vp;
640     SubPicture *sp;
641     AVPicture pict;
642     float aspect_ratio;
643     int width, height, x, y;
644     SDL_Rect rect;
645     int i;
646
647     vp = &is->pictq[is->pictq_rindex];
648     if (vp->bmp) {
649 #if CONFIG_AVFILTER
650          if (vp->picref->video->sample_aspect_ratio.num == 0)
651              aspect_ratio = 0;
652          else
653              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
654 #else
655
656         /* XXX: use variable in the frame */
657         if (is->video_st->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
659         else if (is->video_st->codec->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
661         else
662             aspect_ratio = 0;
663 #endif
664         if (aspect_ratio <= 0.0)
665             aspect_ratio = 1.0;
666         aspect_ratio *= (float)vp->width / (float)vp->height;
667
668         if (is->subtitle_st) {
669             if (is->subpq_size > 0) {
670                 sp = &is->subpq[is->subpq_rindex];
671
672                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
673                     SDL_LockYUVOverlay (vp->bmp);
674
675                     pict.data[0] = vp->bmp->pixels[0];
676                     pict.data[1] = vp->bmp->pixels[2];
677                     pict.data[2] = vp->bmp->pixels[1];
678
679                     pict.linesize[0] = vp->bmp->pitches[0];
680                     pict.linesize[1] = vp->bmp->pitches[2];
681                     pict.linesize[2] = vp->bmp->pitches[1];
682
683                     for (i = 0; i < sp->sub.num_rects; i++)
684                         blend_subrect(&pict, sp->sub.rects[i],
685                                       vp->bmp->w, vp->bmp->h);
686
687                     SDL_UnlockYUVOverlay (vp->bmp);
688                 }
689             }
690         }
691
692
693         /* XXX: we suppose the screen has a 1.0 pixel ratio */
694         height = is->height;
695         width = ((int)rint(height * aspect_ratio)) & ~1;
696         if (width > is->width) {
697             width = is->width;
698             height = ((int)rint(width / aspect_ratio)) & ~1;
699         }
700         x = (is->width - width) / 2;
701         y = (is->height - height) / 2;
702         is->no_background = 0;
703         rect.x = is->xleft + x;
704         rect.y = is->ytop  + y;
705         rect.w = FFMAX(width,  1);
706         rect.h = FFMAX(height, 1);
707         SDL_DisplayYUVOverlay(vp->bmp, &rect);
708     }
709 }
710
711 /* get the current audio output buffer size, in samples. With SDL, we
712    cannot have a precise information */
713 static int audio_write_get_buf_size(VideoState *is)
714 {
715     return is->audio_buf_size - is->audio_buf_index;
716 }
717
718 static inline int compute_mod(int a, int b)
719 {
720     return a < 0 ? a%b + b : a%b;
721 }
722
723 static void video_audio_display(VideoState *s)
724 {
725     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
726     int ch, channels, h, h2, bgcolor, fgcolor;
727     int16_t time_diff;
728     int rdft_bits, nb_freq;
729
730     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
731         ;
732     nb_freq= 1<<(rdft_bits-1);
733
734     /* compute display index : center on currently output samples */
735     channels = s->audio_st->codec->channels;
736     nb_display_channels = channels;
737     if (!s->paused) {
738         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
739         n = 2 * channels;
740         delay = audio_write_get_buf_size(s);
741         delay /= n;
742
743         /* to be more precise, we take into account the time spent since
744            the last buffer computation */
745         if (audio_callback_time) {
746             time_diff = av_gettime() - audio_callback_time;
747             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
748         }
749
750         delay += 2*data_used;
751         if (delay < data_used)
752             delay = data_used;
753
754         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
755         if (s->show_mode == SHOW_MODE_WAVES) {
756             h= INT_MIN;
757             for(i=0; i<1000; i+=channels){
758                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
759                 int a= s->sample_array[idx];
760                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
761                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
762                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
763                 int score= a-d;
764                 if(h<score && (b^c)<0){
765                     h= score;
766                     i_start= idx;
767                 }
768             }
769         }
770
771         s->last_i_start = i_start;
772     } else {
773         i_start = s->last_i_start;
774     }
775
776     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
777     if (s->show_mode == SHOW_MODE_WAVES) {
778         fill_rectangle(screen,
779                        s->xleft, s->ytop, s->width, s->height,
780                        bgcolor);
781
782         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
783
784         /* total height for one channel */
785         h = s->height / nb_display_channels;
786         /* graph height / 2 */
787         h2 = (h * 9) / 20;
788         for(ch = 0;ch < nb_display_channels; ch++) {
789             i = i_start + ch;
790             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
791             for(x = 0; x < s->width; x++) {
792                 y = (s->sample_array[i] * h2) >> 15;
793                 if (y < 0) {
794                     y = -y;
795                     ys = y1 - y;
796                 } else {
797                     ys = y1;
798                 }
799                 fill_rectangle(screen,
800                                s->xleft + x, ys, 1, y,
801                                fgcolor);
802                 i += channels;
803                 if (i >= SAMPLE_ARRAY_SIZE)
804                     i -= SAMPLE_ARRAY_SIZE;
805             }
806         }
807
808         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
809
810         for(ch = 1;ch < nb_display_channels; ch++) {
811             y = s->ytop + ch * h;
812             fill_rectangle(screen,
813                            s->xleft, y, s->width, 1,
814                            fgcolor);
815         }
816         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
817     }else{
818         nb_display_channels= FFMIN(nb_display_channels, 2);
819         if(rdft_bits != s->rdft_bits){
820             av_rdft_end(s->rdft);
821             av_free(s->rdft_data);
822             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
823             s->rdft_bits= rdft_bits;
824             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
825         }
826         {
827             FFTSample *data[2];
828             for(ch = 0;ch < nb_display_channels; ch++) {
829                 data[ch] = s->rdft_data + 2*nb_freq*ch;
830                 i = i_start + ch;
831                 for(x = 0; x < 2*nb_freq; x++) {
832                     double w= (x-nb_freq)*(1.0/nb_freq);
833                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
834                     i += channels;
835                     if (i >= SAMPLE_ARRAY_SIZE)
836                         i -= SAMPLE_ARRAY_SIZE;
837                 }
838                 av_rdft_calc(s->rdft, data[ch]);
839             }
840             //least efficient way to do this, we should of course directly access it but its more than fast enough
841             for(y=0; y<s->height; y++){
842                 double w= 1/sqrt(nb_freq);
843                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
844                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
845                        + data[1][2*y+1]*data[1][2*y+1])) : a;
846                 a= FFMIN(a,255);
847                 b= FFMIN(b,255);
848                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
849
850                 fill_rectangle(screen,
851                             s->xpos, s->height-y, 1, 1,
852                             fgcolor);
853             }
854         }
855         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
856         s->xpos++;
857         if(s->xpos >= s->width)
858             s->xpos= s->xleft;
859     }
860 }
861
862 static int video_open(VideoState *is){
863     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
864     int w,h;
865
866     if(is_full_screen) flags |= SDL_FULLSCREEN;
867     else               flags |= SDL_RESIZABLE;
868
869     if (is_full_screen && fs_screen_width) {
870         w = fs_screen_width;
871         h = fs_screen_height;
872     } else if(!is_full_screen && screen_width){
873         w = screen_width;
874         h = screen_height;
875 #if CONFIG_AVFILTER
876     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
877         w = is->out_video_filter->inputs[0]->w;
878         h = is->out_video_filter->inputs[0]->h;
879 #else
880     }else if (is->video_st && is->video_st->codec->width){
881         w = is->video_st->codec->width;
882         h = is->video_st->codec->height;
883 #endif
884     } else {
885         w = 640;
886         h = 480;
887     }
888     if(screen && is->width == screen->w && screen->w == w
889        && is->height== screen->h && screen->h == h)
890         return 0;
891
892 #ifndef __APPLE__
893     screen = SDL_SetVideoMode(w, h, 0, flags);
894 #else
895     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
896     screen = SDL_SetVideoMode(w, h, 24, flags);
897 #endif
898     if (!screen) {
899         fprintf(stderr, "SDL: could not set video mode - exiting\n");
900         return -1;
901     }
902     if (!window_title)
903         window_title = input_filename;
904     SDL_WM_SetCaption(window_title, window_title);
905
906     is->width = screen->w;
907     is->height = screen->h;
908
909     return 0;
910 }
911
912 /* display the current picture, if any */
913 static void video_display(VideoState *is)
914 {
915     if(!screen)
916         video_open(cur_stream);
917     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
918         video_audio_display(is);
919     else if (is->video_st)
920         video_image_display(is);
921 }
922
923 static int refresh_thread(void *opaque)
924 {
925     VideoState *is= opaque;
926     while(!is->abort_request){
927         SDL_Event event;
928         event.type = FF_REFRESH_EVENT;
929         event.user.data1 = opaque;
930         if(!is->refresh){
931             is->refresh=1;
932             SDL_PushEvent(&event);
933         }
934         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
935         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
936     }
937     return 0;
938 }
939
940 /* get the current audio clock value */
941 static double get_audio_clock(VideoState *is)
942 {
943     double pts;
944     int hw_buf_size, bytes_per_sec;
945     pts = is->audio_clock;
946     hw_buf_size = audio_write_get_buf_size(is);
947     bytes_per_sec = 0;
948     if (is->audio_st) {
949         bytes_per_sec = is->audio_st->codec->sample_rate *
950             2 * is->audio_st->codec->channels;
951     }
952     if (bytes_per_sec)
953         pts -= (double)hw_buf_size / bytes_per_sec;
954     return pts;
955 }
956
957 /* get the current video clock value */
958 static double get_video_clock(VideoState *is)
959 {
960     if (is->paused) {
961         return is->video_current_pts;
962     } else {
963         return is->video_current_pts_drift + av_gettime() / 1000000.0;
964     }
965 }
966
967 /* get the current external clock value */
968 static double get_external_clock(VideoState *is)
969 {
970     int64_t ti;
971     ti = av_gettime();
972     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
973 }
974
975 /* get the current master clock value */
976 static double get_master_clock(VideoState *is)
977 {
978     double val;
979
980     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
981         if (is->video_st)
982             val = get_video_clock(is);
983         else
984             val = get_audio_clock(is);
985     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
986         if (is->audio_st)
987             val = get_audio_clock(is);
988         else
989             val = get_video_clock(is);
990     } else {
991         val = get_external_clock(is);
992     }
993     return val;
994 }
995
996 /* seek in the stream */
997 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
998 {
999     if (!is->seek_req) {
1000         is->seek_pos = pos;
1001         is->seek_rel = rel;
1002         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1003         if (seek_by_bytes)
1004             is->seek_flags |= AVSEEK_FLAG_BYTE;
1005         is->seek_req = 1;
1006     }
1007 }
1008
1009 /* pause or resume the video */
1010 static void stream_toggle_pause(VideoState *is)
1011 {
1012     if (is->paused) {
1013         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1014         if(is->read_pause_return != AVERROR(ENOSYS)){
1015             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1016         }
1017         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1018     }
1019     is->paused = !is->paused;
1020 }
1021
1022 static double compute_target_time(double frame_current_pts, VideoState *is)
1023 {
1024     double delay, sync_threshold, diff;
1025
1026     /* compute nominal delay */
1027     delay = frame_current_pts - is->frame_last_pts;
1028     if (delay <= 0 || delay >= 10.0) {
1029         /* if incorrect delay, use previous one */
1030         delay = is->frame_last_delay;
1031     } else {
1032         is->frame_last_delay = delay;
1033     }
1034     is->frame_last_pts = frame_current_pts;
1035
1036     /* update delay to follow master synchronisation source */
1037     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1038          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1039         /* if video is slave, we try to correct big delays by
1040            duplicating or deleting a frame */
1041         diff = get_video_clock(is) - get_master_clock(is);
1042
1043         /* skip or repeat frame. We take into account the
1044            delay to compute the threshold. I still don't know
1045            if it is the best guess */
1046         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1047         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1048             if (diff <= -sync_threshold)
1049                 delay = 0;
1050             else if (diff >= sync_threshold)
1051                 delay = 2 * delay;
1052         }
1053     }
1054     is->frame_timer += delay;
1055 #if defined(DEBUG_SYNC)
1056     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1057             delay, actual_delay, frame_current_pts, -diff);
1058 #endif
1059
1060     return is->frame_timer;
1061 }
1062
1063 /* called to display each frame */
1064 static void video_refresh(void *opaque)
1065 {
1066     VideoState *is = opaque;
1067     VideoPicture *vp;
1068
1069     SubPicture *sp, *sp2;
1070
1071     if (is->video_st) {
1072 retry:
1073         if (is->pictq_size == 0) {
1074             //nothing to do, no picture to display in the que
1075         } else {
1076             double time= av_gettime()/1000000.0;
1077             double next_target;
1078             /* dequeue the picture */
1079             vp = &is->pictq[is->pictq_rindex];
1080
1081             if(time < vp->target_clock)
1082                 return;
1083             /* update current video pts */
1084             is->video_current_pts = vp->pts;
1085             is->video_current_pts_drift = is->video_current_pts - time;
1086             is->video_current_pos = vp->pos;
1087             if(is->pictq_size > 1){
1088                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1089                 assert(nextvp->target_clock >= vp->target_clock);
1090                 next_target= nextvp->target_clock;
1091             }else{
1092                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1093             }
1094             if(framedrop && time > next_target){
1095                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1096                 if(is->pictq_size > 1 || time > next_target + 0.5){
1097                     /* update queue size and signal for next picture */
1098                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1099                         is->pictq_rindex = 0;
1100
1101                     SDL_LockMutex(is->pictq_mutex);
1102                     is->pictq_size--;
1103                     SDL_CondSignal(is->pictq_cond);
1104                     SDL_UnlockMutex(is->pictq_mutex);
1105                     goto retry;
1106                 }
1107             }
1108
1109             if(is->subtitle_st) {
1110                 if (is->subtitle_stream_changed) {
1111                     SDL_LockMutex(is->subpq_mutex);
1112
1113                     while (is->subpq_size) {
1114                         free_subpicture(&is->subpq[is->subpq_rindex]);
1115
1116                         /* update queue size and signal for next picture */
1117                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1118                             is->subpq_rindex = 0;
1119
1120                         is->subpq_size--;
1121                     }
1122                     is->subtitle_stream_changed = 0;
1123
1124                     SDL_CondSignal(is->subpq_cond);
1125                     SDL_UnlockMutex(is->subpq_mutex);
1126                 } else {
1127                     if (is->subpq_size > 0) {
1128                         sp = &is->subpq[is->subpq_rindex];
1129
1130                         if (is->subpq_size > 1)
1131                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1132                         else
1133                             sp2 = NULL;
1134
1135                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1136                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1137                         {
1138                             free_subpicture(sp);
1139
1140                             /* update queue size and signal for next picture */
1141                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1142                                 is->subpq_rindex = 0;
1143
1144                             SDL_LockMutex(is->subpq_mutex);
1145                             is->subpq_size--;
1146                             SDL_CondSignal(is->subpq_cond);
1147                             SDL_UnlockMutex(is->subpq_mutex);
1148                         }
1149                     }
1150                 }
1151             }
1152
1153             /* display picture */
1154             if (!display_disable)
1155                 video_display(is);
1156
1157             /* update queue size and signal for next picture */
1158             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1159                 is->pictq_rindex = 0;
1160
1161             SDL_LockMutex(is->pictq_mutex);
1162             is->pictq_size--;
1163             SDL_CondSignal(is->pictq_cond);
1164             SDL_UnlockMutex(is->pictq_mutex);
1165         }
1166     } else if (is->audio_st) {
1167         /* draw the next audio frame */
1168
1169         /* if only audio stream, then display the audio bars (better
1170            than nothing, just to test the implementation */
1171
1172         /* display picture */
1173         if (!display_disable)
1174             video_display(is);
1175     }
1176     if (show_status) {
1177         static int64_t last_time;
1178         int64_t cur_time;
1179         int aqsize, vqsize, sqsize;
1180         double av_diff;
1181
1182         cur_time = av_gettime();
1183         if (!last_time || (cur_time - last_time) >= 30000) {
1184             aqsize = 0;
1185             vqsize = 0;
1186             sqsize = 0;
1187             if (is->audio_st)
1188                 aqsize = is->audioq.size;
1189             if (is->video_st)
1190                 vqsize = is->videoq.size;
1191             if (is->subtitle_st)
1192                 sqsize = is->subtitleq.size;
1193             av_diff = 0;
1194             if (is->audio_st && is->video_st)
1195                 av_diff = get_audio_clock(is) - get_video_clock(is);
1196             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1197                    get_master_clock(is),
1198                    av_diff,
1199                    FFMAX(is->skip_frames-1, 0),
1200                    aqsize / 1024,
1201                    vqsize / 1024,
1202                    sqsize,
1203                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1204                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1205             fflush(stdout);
1206             last_time = cur_time;
1207         }
1208     }
1209 }
1210
1211 static void stream_close(VideoState *is)
1212 {
1213     VideoPicture *vp;
1214     int i;
1215     /* XXX: use a special url_shutdown call to abort parse cleanly */
1216     is->abort_request = 1;
1217     SDL_WaitThread(is->read_tid, NULL);
1218     SDL_WaitThread(is->refresh_tid, NULL);
1219
1220     /* free all pictures */
1221     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1222         vp = &is->pictq[i];
1223 #if CONFIG_AVFILTER
1224         if (vp->picref) {
1225             avfilter_unref_buffer(vp->picref);
1226             vp->picref = NULL;
1227         }
1228 #endif
1229         if (vp->bmp) {
1230             SDL_FreeYUVOverlay(vp->bmp);
1231             vp->bmp = NULL;
1232         }
1233     }
1234     SDL_DestroyMutex(is->pictq_mutex);
1235     SDL_DestroyCond(is->pictq_cond);
1236     SDL_DestroyMutex(is->subpq_mutex);
1237     SDL_DestroyCond(is->subpq_cond);
1238 #if !CONFIG_AVFILTER
1239     if (is->img_convert_ctx)
1240         sws_freeContext(is->img_convert_ctx);
1241 #endif
1242     av_free(is);
1243 }
1244
1245 static void do_exit(void)
1246 {
1247     if (cur_stream) {
1248         stream_close(cur_stream);
1249         cur_stream = NULL;
1250     }
1251     uninit_opts();
1252 #if CONFIG_AVFILTER
1253     avfilter_uninit();
1254 #endif
1255     if (show_status)
1256         printf("\n");
1257     SDL_Quit();
1258     av_log(NULL, AV_LOG_QUIET, "");
1259     exit(0);
1260 }
1261
1262 /* allocate a picture (needs to do that in main thread to avoid
1263    potential locking problems */
1264 static void alloc_picture(void *opaque)
1265 {
1266     VideoState *is = opaque;
1267     VideoPicture *vp;
1268
1269     vp = &is->pictq[is->pictq_windex];
1270
1271     if (vp->bmp)
1272         SDL_FreeYUVOverlay(vp->bmp);
1273
1274 #if CONFIG_AVFILTER
1275     if (vp->picref)
1276         avfilter_unref_buffer(vp->picref);
1277     vp->picref = NULL;
1278
1279     vp->width   = is->out_video_filter->inputs[0]->w;
1280     vp->height  = is->out_video_filter->inputs[0]->h;
1281     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1282 #else
1283     vp->width   = is->video_st->codec->width;
1284     vp->height  = is->video_st->codec->height;
1285     vp->pix_fmt = is->video_st->codec->pix_fmt;
1286 #endif
1287
1288     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1289                                    SDL_YV12_OVERLAY,
1290                                    screen);
1291     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1292         /* SDL allocates a buffer smaller than requested if the video
1293          * overlay hardware is unable to support the requested size. */
1294         fprintf(stderr, "Error: the video system does not support an image\n"
1295                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1296                         "to reduce the image size.\n", vp->width, vp->height );
1297         do_exit();
1298     }
1299
1300     SDL_LockMutex(is->pictq_mutex);
1301     vp->allocated = 1;
1302     SDL_CondSignal(is->pictq_cond);
1303     SDL_UnlockMutex(is->pictq_mutex);
1304 }
1305
1306 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1307 {
1308     VideoPicture *vp;
1309     double frame_delay, pts = pts1;
1310
1311     /* compute the exact PTS for the picture if it is omitted in the stream
1312      * pts1 is the dts of the pkt / pts of the frame */
1313     if (pts != 0) {
1314         /* update video clock with pts, if present */
1315         is->video_clock = pts;
1316     } else {
1317         pts = is->video_clock;
1318     }
1319     /* update video clock for next frame */
1320     frame_delay = av_q2d(is->video_st->codec->time_base);
1321     /* for MPEG2, the frame can be repeated, so we update the
1322        clock accordingly */
1323     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1324     is->video_clock += frame_delay;
1325
1326 #if defined(DEBUG_SYNC) && 0
1327     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1328            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1329 #endif
1330
1331     /* wait until we have space to put a new picture */
1332     SDL_LockMutex(is->pictq_mutex);
1333
1334     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1335         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1336
1337     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1338            !is->videoq.abort_request) {
1339         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1340     }
1341     SDL_UnlockMutex(is->pictq_mutex);
1342
1343     if (is->videoq.abort_request)
1344         return -1;
1345
1346     vp = &is->pictq[is->pictq_windex];
1347
1348     /* alloc or resize hardware picture buffer */
1349     if (!vp->bmp ||
1350 #if CONFIG_AVFILTER
1351         vp->width  != is->out_video_filter->inputs[0]->w ||
1352         vp->height != is->out_video_filter->inputs[0]->h) {
1353 #else
1354         vp->width != is->video_st->codec->width ||
1355         vp->height != is->video_st->codec->height) {
1356 #endif
1357         SDL_Event event;
1358
1359         vp->allocated = 0;
1360
1361         /* the allocation must be done in the main thread to avoid
1362            locking problems */
1363         event.type = FF_ALLOC_EVENT;
1364         event.user.data1 = is;
1365         SDL_PushEvent(&event);
1366
1367         /* wait until the picture is allocated */
1368         SDL_LockMutex(is->pictq_mutex);
1369         while (!vp->allocated && !is->videoq.abort_request) {
1370             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1371         }
1372         SDL_UnlockMutex(is->pictq_mutex);
1373
1374         if (is->videoq.abort_request)
1375             return -1;
1376     }
1377
1378     /* if the frame is not skipped, then display it */
1379     if (vp->bmp) {
1380         AVPicture pict;
1381 #if CONFIG_AVFILTER
1382         if(vp->picref)
1383             avfilter_unref_buffer(vp->picref);
1384         vp->picref = src_frame->opaque;
1385 #endif
1386
1387         /* get a pointer on the bitmap */
1388         SDL_LockYUVOverlay (vp->bmp);
1389
1390         memset(&pict,0,sizeof(AVPicture));
1391         pict.data[0] = vp->bmp->pixels[0];
1392         pict.data[1] = vp->bmp->pixels[2];
1393         pict.data[2] = vp->bmp->pixels[1];
1394
1395         pict.linesize[0] = vp->bmp->pitches[0];
1396         pict.linesize[1] = vp->bmp->pitches[2];
1397         pict.linesize[2] = vp->bmp->pitches[1];
1398
1399 #if CONFIG_AVFILTER
1400         //FIXME use direct rendering
1401         av_picture_copy(&pict, (AVPicture *)src_frame,
1402                         vp->pix_fmt, vp->width, vp->height);
1403 #else
1404         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1405         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1406             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1407             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1408         if (is->img_convert_ctx == NULL) {
1409             fprintf(stderr, "Cannot initialize the conversion context\n");
1410             exit(1);
1411         }
1412         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1413                   0, vp->height, pict.data, pict.linesize);
1414 #endif
1415         /* update the bitmap content */
1416         SDL_UnlockYUVOverlay(vp->bmp);
1417
1418         vp->pts = pts;
1419         vp->pos = pos;
1420
1421         /* now we can update the picture count */
1422         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1423             is->pictq_windex = 0;
1424         SDL_LockMutex(is->pictq_mutex);
1425         vp->target_clock= compute_target_time(vp->pts, is);
1426
1427         is->pictq_size++;
1428         SDL_UnlockMutex(is->pictq_mutex);
1429     }
1430     return 0;
1431 }
1432
1433 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1434 {
1435     int len1 av_unused, got_picture, i;
1436
1437     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1438         return -1;
1439
1440     if (pkt->data == flush_pkt.data) {
1441         avcodec_flush_buffers(is->video_st->codec);
1442
1443         SDL_LockMutex(is->pictq_mutex);
1444         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1445         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1446             is->pictq[i].target_clock= 0;
1447         }
1448         while (is->pictq_size && !is->videoq.abort_request) {
1449             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1450         }
1451         is->video_current_pos = -1;
1452         SDL_UnlockMutex(is->pictq_mutex);
1453
1454         is->frame_last_pts = AV_NOPTS_VALUE;
1455         is->frame_last_delay = 0;
1456         is->frame_timer = (double)av_gettime() / 1000000.0;
1457         is->skip_frames = 1;
1458         is->skip_frames_index = 0;
1459         return 0;
1460     }
1461
1462     len1 = avcodec_decode_video2(is->video_st->codec,
1463                                  frame, &got_picture,
1464                                  pkt);
1465
1466     if (got_picture) {
1467         if (decoder_reorder_pts == -1) {
1468             *pts = frame->best_effort_timestamp;
1469         } else if (decoder_reorder_pts) {
1470             *pts = frame->pkt_pts;
1471         } else {
1472             *pts = frame->pkt_dts;
1473         }
1474
1475         if (*pts == AV_NOPTS_VALUE) {
1476             *pts = 0;
1477         }
1478
1479         is->skip_frames_index += 1;
1480         if(is->skip_frames_index >= is->skip_frames){
1481             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1482             return 1;
1483         }
1484
1485     }
1486     return 0;
1487 }
1488
1489 #if CONFIG_AVFILTER
1490 typedef struct {
1491     VideoState *is;
1492     AVFrame *frame;
1493     int use_dr1;
1494 } FilterPriv;
1495
1496 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1497 {
1498     AVFilterContext *ctx = codec->opaque;
1499     AVFilterBufferRef  *ref;
1500     int perms = AV_PERM_WRITE;
1501     int i, w, h, stride[4];
1502     unsigned edge;
1503     int pixel_size;
1504
1505     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1506
1507     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1508         perms |= AV_PERM_NEG_LINESIZES;
1509
1510     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1511         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1512         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1513         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1514     }
1515     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1516
1517     w = codec->width;
1518     h = codec->height;
1519
1520     if(av_image_check_size(w, h, 0, codec))
1521         return -1;
1522
1523     avcodec_align_dimensions2(codec, &w, &h, stride);
1524     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1525     w += edge << 1;
1526     h += edge << 1;
1527
1528     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1529         return -1;
1530
1531     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1532     ref->video->w = codec->width;
1533     ref->video->h = codec->height;
1534     for(i = 0; i < 4; i ++) {
1535         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1536         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1537
1538         if (ref->data[i]) {
1539             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1540         }
1541         pic->data[i]     = ref->data[i];
1542         pic->linesize[i] = ref->linesize[i];
1543     }
1544     pic->opaque = ref;
1545     pic->age    = INT_MAX;
1546     pic->type   = FF_BUFFER_TYPE_USER;
1547     pic->reordered_opaque = codec->reordered_opaque;
1548     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1549     else           pic->pkt_pts = AV_NOPTS_VALUE;
1550     return 0;
1551 }
1552
1553 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1554 {
1555     memset(pic->data, 0, sizeof(pic->data));
1556     avfilter_unref_buffer(pic->opaque);
1557 }
1558
1559 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1560 {
1561     AVFilterBufferRef *ref = pic->opaque;
1562
1563     if (pic->data[0] == NULL) {
1564         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1565         return codec->get_buffer(codec, pic);
1566     }
1567
1568     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1569         (codec->pix_fmt != ref->format)) {
1570         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1571         return -1;
1572     }
1573
1574     pic->reordered_opaque = codec->reordered_opaque;
1575     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1576     else           pic->pkt_pts = AV_NOPTS_VALUE;
1577     return 0;
1578 }
1579
1580 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1581 {
1582     FilterPriv *priv = ctx->priv;
1583     AVCodecContext *codec;
1584     if(!opaque) return -1;
1585
1586     priv->is = opaque;
1587     codec    = priv->is->video_st->codec;
1588     codec->opaque = ctx;
1589     if((codec->codec->capabilities & CODEC_CAP_DR1)
1590     ) {
1591         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1592         priv->use_dr1 = 1;
1593         codec->get_buffer     = input_get_buffer;
1594         codec->release_buffer = input_release_buffer;
1595         codec->reget_buffer   = input_reget_buffer;
1596         codec->thread_safe_callbacks = 1;
1597     }
1598
1599     priv->frame = avcodec_alloc_frame();
1600
1601     return 0;
1602 }
1603
1604 static void input_uninit(AVFilterContext *ctx)
1605 {
1606     FilterPriv *priv = ctx->priv;
1607     av_free(priv->frame);
1608 }
1609
1610 static int input_request_frame(AVFilterLink *link)
1611 {
1612     FilterPriv *priv = link->src->priv;
1613     AVFilterBufferRef *picref;
1614     int64_t pts = 0;
1615     AVPacket pkt;
1616     int ret;
1617
1618     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1619         av_free_packet(&pkt);
1620     if (ret < 0)
1621         return -1;
1622
1623     if(priv->use_dr1 && priv->frame->opaque) {
1624         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1625     } else {
1626         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1627         av_image_copy(picref->data, picref->linesize,
1628                       priv->frame->data, priv->frame->linesize,
1629                       picref->format, link->w, link->h);
1630     }
1631     av_free_packet(&pkt);
1632
1633     avfilter_copy_frame_props(picref, priv->frame);
1634     picref->pts = pts;
1635
1636     avfilter_start_frame(link, picref);
1637     avfilter_draw_slice(link, 0, link->h, 1);
1638     avfilter_end_frame(link);
1639
1640     return 0;
1641 }
1642
1643 static int input_query_formats(AVFilterContext *ctx)
1644 {
1645     FilterPriv *priv = ctx->priv;
1646     enum PixelFormat pix_fmts[] = {
1647         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1648     };
1649
1650     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1651     return 0;
1652 }
1653
1654 static int input_config_props(AVFilterLink *link)
1655 {
1656     FilterPriv *priv  = link->src->priv;
1657     AVCodecContext *c = priv->is->video_st->codec;
1658
1659     link->w = c->width;
1660     link->h = c->height;
1661     link->time_base = priv->is->video_st->time_base;
1662
1663     return 0;
1664 }
1665
1666 static AVFilter input_filter =
1667 {
1668     .name      = "ffplay_input",
1669
1670     .priv_size = sizeof(FilterPriv),
1671
1672     .init      = input_init,
1673     .uninit    = input_uninit,
1674
1675     .query_formats = input_query_formats,
1676
1677     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1678     .outputs   = (AVFilterPad[]) {{ .name = "default",
1679                                     .type = AVMEDIA_TYPE_VIDEO,
1680                                     .request_frame = input_request_frame,
1681                                     .config_props  = input_config_props, },
1682                                   { .name = NULL }},
1683 };
1684
1685 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1686 {
1687     char sws_flags_str[128];
1688     int ret;
1689     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1690     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1691     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1692     graph->scale_sws_opts = av_strdup(sws_flags_str);
1693
1694     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1695                                             NULL, is, graph)) < 0)
1696         goto the_end;
1697     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1698                                             NULL, &ffsink_ctx, graph)) < 0)
1699         goto the_end;
1700
1701     if(vfilters) {
1702         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1703         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1704
1705         outputs->name    = av_strdup("in");
1706         outputs->filter_ctx = filt_src;
1707         outputs->pad_idx = 0;
1708         outputs->next    = NULL;
1709
1710         inputs->name    = av_strdup("out");
1711         inputs->filter_ctx = filt_out;
1712         inputs->pad_idx = 0;
1713         inputs->next    = NULL;
1714
1715         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1716             goto the_end;
1717         av_freep(&vfilters);
1718     } else {
1719         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1720             goto the_end;
1721     }
1722
1723     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1724         goto the_end;
1725
1726     is->out_video_filter = filt_out;
1727 the_end:
1728     return ret;
1729 }
1730
1731 #endif  /* CONFIG_AVFILTER */
1732
1733 static int video_thread(void *arg)
1734 {
1735     VideoState *is = arg;
1736     AVFrame *frame= avcodec_alloc_frame();
1737     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1738     double pts;
1739     int ret;
1740
1741 #if CONFIG_AVFILTER
1742     AVFilterGraph *graph = avfilter_graph_alloc();
1743     AVFilterContext *filt_out = NULL;
1744
1745     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1746         goto the_end;
1747     filt_out = is->out_video_filter;
1748 #endif
1749
1750     for(;;) {
1751 #if !CONFIG_AVFILTER
1752         AVPacket pkt;
1753 #else
1754         AVFilterBufferRef *picref;
1755         AVRational tb;
1756 #endif
1757         while (is->paused && !is->videoq.abort_request)
1758             SDL_Delay(10);
1759 #if CONFIG_AVFILTER
1760         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1761         if (picref) {
1762             pts_int = picref->pts;
1763             pos     = picref->pos;
1764             frame->opaque = picref;
1765         }
1766
1767         if (av_cmp_q(tb, is->video_st->time_base)) {
1768             av_unused int64_t pts1 = pts_int;
1769             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1770             av_dlog(NULL, "video_thread(): "
1771                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1772                     tb.num, tb.den, pts1,
1773                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1774         }
1775 #else
1776         ret = get_video_frame(is, frame, &pts_int, &pkt);
1777         pos = pkt.pos;
1778         av_free_packet(&pkt);
1779 #endif
1780
1781         if (ret < 0) goto the_end;
1782
1783         if (!ret)
1784             continue;
1785
1786         pts = pts_int*av_q2d(is->video_st->time_base);
1787
1788         ret = queue_picture(is, frame, pts, pos);
1789
1790         if (ret < 0)
1791             goto the_end;
1792
1793         if (step)
1794             if (cur_stream)
1795                 stream_toggle_pause(cur_stream);
1796     }
1797  the_end:
1798 #if CONFIG_AVFILTER
1799     avfilter_graph_free(&graph);
1800 #endif
1801     av_free(frame);
1802     return 0;
1803 }
1804
1805 static int subtitle_thread(void *arg)
1806 {
1807     VideoState *is = arg;
1808     SubPicture *sp;
1809     AVPacket pkt1, *pkt = &pkt1;
1810     int len1 av_unused, got_subtitle;
1811     double pts;
1812     int i, j;
1813     int r, g, b, y, u, v, a;
1814
1815     for(;;) {
1816         while (is->paused && !is->subtitleq.abort_request) {
1817             SDL_Delay(10);
1818         }
1819         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1820             break;
1821
1822         if(pkt->data == flush_pkt.data){
1823             avcodec_flush_buffers(is->subtitle_st->codec);
1824             continue;
1825         }
1826         SDL_LockMutex(is->subpq_mutex);
1827         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1828                !is->subtitleq.abort_request) {
1829             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1830         }
1831         SDL_UnlockMutex(is->subpq_mutex);
1832
1833         if (is->subtitleq.abort_request)
1834             goto the_end;
1835
1836         sp = &is->subpq[is->subpq_windex];
1837
1838        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1839            this packet, if any */
1840         pts = 0;
1841         if (pkt->pts != AV_NOPTS_VALUE)
1842             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1843
1844         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1845                                     &sp->sub, &got_subtitle,
1846                                     pkt);
1847         if (got_subtitle && sp->sub.format == 0) {
1848             sp->pts = pts;
1849
1850             for (i = 0; i < sp->sub.num_rects; i++)
1851             {
1852                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1853                 {
1854                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1855                     y = RGB_TO_Y_CCIR(r, g, b);
1856                     u = RGB_TO_U_CCIR(r, g, b, 0);
1857                     v = RGB_TO_V_CCIR(r, g, b, 0);
1858                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1859                 }
1860             }
1861
1862             /* now we can update the picture count */
1863             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1864                 is->subpq_windex = 0;
1865             SDL_LockMutex(is->subpq_mutex);
1866             is->subpq_size++;
1867             SDL_UnlockMutex(is->subpq_mutex);
1868         }
1869         av_free_packet(pkt);
1870     }
1871  the_end:
1872     return 0;
1873 }
1874
1875 /* copy samples for viewing in editor window */
1876 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1877 {
1878     int size, len;
1879
1880     size = samples_size / sizeof(short);
1881     while (size > 0) {
1882         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1883         if (len > size)
1884             len = size;
1885         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1886         samples += len;
1887         is->sample_array_index += len;
1888         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1889             is->sample_array_index = 0;
1890         size -= len;
1891     }
1892 }
1893
1894 /* return the new audio buffer size (samples can be added or deleted
1895    to get better sync if video or external master clock) */
1896 static int synchronize_audio(VideoState *is, short *samples,
1897                              int samples_size1, double pts)
1898 {
1899     int n, samples_size;
1900     double ref_clock;
1901
1902     n = 2 * is->audio_st->codec->channels;
1903     samples_size = samples_size1;
1904
1905     /* if not master, then we try to remove or add samples to correct the clock */
1906     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1907          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1908         double diff, avg_diff;
1909         int wanted_size, min_size, max_size, nb_samples;
1910
1911         ref_clock = get_master_clock(is);
1912         diff = get_audio_clock(is) - ref_clock;
1913
1914         if (diff < AV_NOSYNC_THRESHOLD) {
1915             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1916             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1917                 /* not enough measures to have a correct estimate */
1918                 is->audio_diff_avg_count++;
1919             } else {
1920                 /* estimate the A-V difference */
1921                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1922
1923                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1924                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1925                     nb_samples = samples_size / n;
1926
1927                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1928                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1929                     if (wanted_size < min_size)
1930                         wanted_size = min_size;
1931                     else if (wanted_size > max_size)
1932                         wanted_size = max_size;
1933
1934                     /* add or remove samples to correction the synchro */
1935                     if (wanted_size < samples_size) {
1936                         /* remove samples */
1937                         samples_size = wanted_size;
1938                     } else if (wanted_size > samples_size) {
1939                         uint8_t *samples_end, *q;
1940                         int nb;
1941
1942                         /* add samples */
1943                         nb = (samples_size - wanted_size);
1944                         samples_end = (uint8_t *)samples + samples_size - n;
1945                         q = samples_end + n;
1946                         while (nb > 0) {
1947                             memcpy(q, samples_end, n);
1948                             q += n;
1949                             nb -= n;
1950                         }
1951                         samples_size = wanted_size;
1952                     }
1953                 }
1954 #if 0
1955                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1956                        diff, avg_diff, samples_size - samples_size1,
1957                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1958 #endif
1959             }
1960         } else {
1961             /* too big difference : may be initial PTS errors, so
1962                reset A-V filter */
1963             is->audio_diff_avg_count = 0;
1964             is->audio_diff_cum = 0;
1965         }
1966     }
1967
1968     return samples_size;
1969 }
1970
1971 /* decode one audio frame and returns its uncompressed size */
1972 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1973 {
1974     AVPacket *pkt_temp = &is->audio_pkt_temp;
1975     AVPacket *pkt = &is->audio_pkt;
1976     AVCodecContext *dec= is->audio_st->codec;
1977     int n, len1, data_size;
1978     double pts;
1979
1980     for(;;) {
1981         /* NOTE: the audio packet can contain several frames */
1982         while (pkt_temp->size > 0) {
1983             data_size = sizeof(is->audio_buf1);
1984             len1 = avcodec_decode_audio3(dec,
1985                                         (int16_t *)is->audio_buf1, &data_size,
1986                                         pkt_temp);
1987             if (len1 < 0) {
1988                 /* if error, we skip the frame */
1989                 pkt_temp->size = 0;
1990                 break;
1991             }
1992
1993             pkt_temp->data += len1;
1994             pkt_temp->size -= len1;
1995             if (data_size <= 0)
1996                 continue;
1997
1998             if (dec->sample_fmt != is->audio_src_fmt) {
1999                 if (is->reformat_ctx)
2000                     av_audio_convert_free(is->reformat_ctx);
2001                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2002                                                          dec->sample_fmt, 1, NULL, 0);
2003                 if (!is->reformat_ctx) {
2004                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2005                         av_get_sample_fmt_name(dec->sample_fmt),
2006                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2007                         break;
2008                 }
2009                 is->audio_src_fmt= dec->sample_fmt;
2010             }
2011
2012             if (is->reformat_ctx) {
2013                 const void *ibuf[6]= {is->audio_buf1};
2014                 void *obuf[6]= {is->audio_buf2};
2015                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2016                 int ostride[6]= {2};
2017                 int len= data_size/istride[0];
2018                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2019                     printf("av_audio_convert() failed\n");
2020                     break;
2021                 }
2022                 is->audio_buf= is->audio_buf2;
2023                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2024                           remove this legacy cruft */
2025                 data_size= len*2;
2026             }else{
2027                 is->audio_buf= is->audio_buf1;
2028             }
2029
2030             /* if no pts, then compute it */
2031             pts = is->audio_clock;
2032             *pts_ptr = pts;
2033             n = 2 * dec->channels;
2034             is->audio_clock += (double)data_size /
2035                 (double)(n * dec->sample_rate);
2036 #if defined(DEBUG_SYNC)
2037             {
2038                 static double last_clock;
2039                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2040                        is->audio_clock - last_clock,
2041                        is->audio_clock, pts);
2042                 last_clock = is->audio_clock;
2043             }
2044 #endif
2045             return data_size;
2046         }
2047
2048         /* free the current packet */
2049         if (pkt->data)
2050             av_free_packet(pkt);
2051
2052         if (is->paused || is->audioq.abort_request) {
2053             return -1;
2054         }
2055
2056         /* read next packet */
2057         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2058             return -1;
2059         if(pkt->data == flush_pkt.data){
2060             avcodec_flush_buffers(dec);
2061             continue;
2062         }
2063
2064         pkt_temp->data = pkt->data;
2065         pkt_temp->size = pkt->size;
2066
2067         /* if update the audio clock with the pts */
2068         if (pkt->pts != AV_NOPTS_VALUE) {
2069             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2070         }
2071     }
2072 }
2073
2074 /* prepare a new audio buffer */
2075 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2076 {
2077     VideoState *is = opaque;
2078     int audio_size, len1;
2079     double pts;
2080
2081     audio_callback_time = av_gettime();
2082
2083     while (len > 0) {
2084         if (is->audio_buf_index >= is->audio_buf_size) {
2085            audio_size = audio_decode_frame(is, &pts);
2086            if (audio_size < 0) {
2087                 /* if error, just output silence */
2088                is->audio_buf = is->audio_buf1;
2089                is->audio_buf_size = 1024;
2090                memset(is->audio_buf, 0, is->audio_buf_size);
2091            } else {
2092                if (is->show_mode != SHOW_MODE_VIDEO)
2093                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2094                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2095                                               pts);
2096                is->audio_buf_size = audio_size;
2097            }
2098            is->audio_buf_index = 0;
2099         }
2100         len1 = is->audio_buf_size - is->audio_buf_index;
2101         if (len1 > len)
2102             len1 = len;
2103         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2104         len -= len1;
2105         stream += len1;
2106         is->audio_buf_index += len1;
2107     }
2108 }
2109
2110 /* open a given stream. Return 0 if OK */
2111 static int stream_component_open(VideoState *is, int stream_index)
2112 {
2113     AVFormatContext *ic = is->ic;
2114     AVCodecContext *avctx;
2115     AVCodec *codec;
2116     SDL_AudioSpec wanted_spec, spec;
2117
2118     if (stream_index < 0 || stream_index >= ic->nb_streams)
2119         return -1;
2120     avctx = ic->streams[stream_index]->codec;
2121
2122     /* prepare audio output */
2123     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2124         if (avctx->channels > 0) {
2125             avctx->request_channels = FFMIN(2, avctx->channels);
2126         } else {
2127             avctx->request_channels = 2;
2128         }
2129     }
2130
2131     codec = avcodec_find_decoder(avctx->codec_id);
2132     if (!codec)
2133         return -1;
2134
2135     avctx->workaround_bugs = workaround_bugs;
2136     avctx->lowres = lowres;
2137     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2138     avctx->idct_algo= idct;
2139     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2140     avctx->skip_frame= skip_frame;
2141     avctx->skip_idct= skip_idct;
2142     avctx->skip_loop_filter= skip_loop_filter;
2143     avctx->error_recognition= error_recognition;
2144     avctx->error_concealment= error_concealment;
2145     avctx->thread_count= thread_count;
2146
2147     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2148
2149     if(codec->capabilities & CODEC_CAP_DR1)
2150         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2151
2152     if (avcodec_open(avctx, codec) < 0)
2153         return -1;
2154
2155     /* prepare audio output */
2156     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2157         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2158             fprintf(stderr, "Invalid sample rate or channel count\n");
2159             return -1;
2160         }
2161         wanted_spec.freq = avctx->sample_rate;
2162         wanted_spec.format = AUDIO_S16SYS;
2163         wanted_spec.channels = avctx->channels;
2164         wanted_spec.silence = 0;
2165         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2166         wanted_spec.callback = sdl_audio_callback;
2167         wanted_spec.userdata = is;
2168         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2169             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2170             return -1;
2171         }
2172         is->audio_hw_buf_size = spec.size;
2173         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2174     }
2175
2176     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2177     switch(avctx->codec_type) {
2178     case AVMEDIA_TYPE_AUDIO:
2179         is->audio_stream = stream_index;
2180         is->audio_st = ic->streams[stream_index];
2181         is->audio_buf_size = 0;
2182         is->audio_buf_index = 0;
2183
2184         /* init averaging filter */
2185         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2186         is->audio_diff_avg_count = 0;
2187         /* since we do not have a precise anough audio fifo fullness,
2188            we correct audio sync only if larger than this threshold */
2189         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2190
2191         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2192         packet_queue_init(&is->audioq);
2193         SDL_PauseAudio(0);
2194         break;
2195     case AVMEDIA_TYPE_VIDEO:
2196         is->video_stream = stream_index;
2197         is->video_st = ic->streams[stream_index];
2198
2199         packet_queue_init(&is->videoq);
2200         is->video_tid = SDL_CreateThread(video_thread, is);
2201         break;
2202     case AVMEDIA_TYPE_SUBTITLE:
2203         is->subtitle_stream = stream_index;
2204         is->subtitle_st = ic->streams[stream_index];
2205         packet_queue_init(&is->subtitleq);
2206
2207         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2208         break;
2209     default:
2210         break;
2211     }
2212     return 0;
2213 }
2214
2215 static void stream_component_close(VideoState *is, int stream_index)
2216 {
2217     AVFormatContext *ic = is->ic;
2218     AVCodecContext *avctx;
2219
2220     if (stream_index < 0 || stream_index >= ic->nb_streams)
2221         return;
2222     avctx = ic->streams[stream_index]->codec;
2223
2224     switch(avctx->codec_type) {
2225     case AVMEDIA_TYPE_AUDIO:
2226         packet_queue_abort(&is->audioq);
2227
2228         SDL_CloseAudio();
2229
2230         packet_queue_end(&is->audioq);
2231         if (is->reformat_ctx)
2232             av_audio_convert_free(is->reformat_ctx);
2233         is->reformat_ctx = NULL;
2234         break;
2235     case AVMEDIA_TYPE_VIDEO:
2236         packet_queue_abort(&is->videoq);
2237
2238         /* note: we also signal this mutex to make sure we deblock the
2239            video thread in all cases */
2240         SDL_LockMutex(is->pictq_mutex);
2241         SDL_CondSignal(is->pictq_cond);
2242         SDL_UnlockMutex(is->pictq_mutex);
2243
2244         SDL_WaitThread(is->video_tid, NULL);
2245
2246         packet_queue_end(&is->videoq);
2247         break;
2248     case AVMEDIA_TYPE_SUBTITLE:
2249         packet_queue_abort(&is->subtitleq);
2250
2251         /* note: we also signal this mutex to make sure we deblock the
2252            video thread in all cases */
2253         SDL_LockMutex(is->subpq_mutex);
2254         is->subtitle_stream_changed = 1;
2255
2256         SDL_CondSignal(is->subpq_cond);
2257         SDL_UnlockMutex(is->subpq_mutex);
2258
2259         SDL_WaitThread(is->subtitle_tid, NULL);
2260
2261         packet_queue_end(&is->subtitleq);
2262         break;
2263     default:
2264         break;
2265     }
2266
2267     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2268     avcodec_close(avctx);
2269     switch(avctx->codec_type) {
2270     case AVMEDIA_TYPE_AUDIO:
2271         is->audio_st = NULL;
2272         is->audio_stream = -1;
2273         break;
2274     case AVMEDIA_TYPE_VIDEO:
2275         is->video_st = NULL;
2276         is->video_stream = -1;
2277         break;
2278     case AVMEDIA_TYPE_SUBTITLE:
2279         is->subtitle_st = NULL;
2280         is->subtitle_stream = -1;
2281         break;
2282     default:
2283         break;
2284     }
2285 }
2286
2287 /* since we have only one decoding thread, we can use a global
2288    variable instead of a thread local variable */
2289 static VideoState *global_video_state;
2290
2291 static int decode_interrupt_cb(void)
2292 {
2293     return (global_video_state && global_video_state->abort_request);
2294 }
2295
2296 /* this thread gets the stream from the disk or the network */
2297 static int read_thread(void *arg)
2298 {
2299     VideoState *is = arg;
2300     AVFormatContext *ic;
2301     int err, i, ret;
2302     int st_index[AVMEDIA_TYPE_NB];
2303     AVPacket pkt1, *pkt = &pkt1;
2304     AVFormatParameters params, *ap = &params;
2305     int eof=0;
2306     int pkt_in_play_range = 0;
2307
2308     ic = avformat_alloc_context();
2309
2310     memset(st_index, -1, sizeof(st_index));
2311     is->video_stream = -1;
2312     is->audio_stream = -1;
2313     is->subtitle_stream = -1;
2314
2315     global_video_state = is;
2316     avio_set_interrupt_cb(decode_interrupt_cb);
2317
2318     memset(ap, 0, sizeof(*ap));
2319
2320     ap->prealloced_context = 1;
2321     ap->width = frame_width;
2322     ap->height= frame_height;
2323     ap->time_base= (AVRational){1, 25};
2324     ap->pix_fmt = frame_pix_fmt;
2325     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2326
2327
2328     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2329     if (err >= 0) {
2330         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2331         err = av_demuxer_open(ic, ap);
2332         if(err < 0){
2333             avformat_free_context(ic);
2334             ic= NULL;
2335         }
2336     }
2337     if (err < 0) {
2338         print_error(is->filename, err);
2339         ret = -1;
2340         goto fail;
2341     }
2342     is->ic = ic;
2343
2344     if(genpts)
2345         ic->flags |= AVFMT_FLAG_GENPTS;
2346
2347     err = av_find_stream_info(ic);
2348     if (err < 0) {
2349         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2350         ret = -1;
2351         goto fail;
2352     }
2353     if(ic->pb)
2354         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2355
2356     if(seek_by_bytes<0)
2357         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2358
2359     /* if seeking requested, we execute it */
2360     if (start_time != AV_NOPTS_VALUE) {
2361         int64_t timestamp;
2362
2363         timestamp = start_time;
2364         /* add the stream start time */
2365         if (ic->start_time != AV_NOPTS_VALUE)
2366             timestamp += ic->start_time;
2367         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2368         if (ret < 0) {
2369             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2370                     is->filename, (double)timestamp / AV_TIME_BASE);
2371         }
2372     }
2373
2374     for (i = 0; i < ic->nb_streams; i++)
2375         ic->streams[i]->discard = AVDISCARD_ALL;
2376     if (!video_disable)
2377         st_index[AVMEDIA_TYPE_VIDEO] =
2378             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2379                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2380     if (!audio_disable)
2381         st_index[AVMEDIA_TYPE_AUDIO] =
2382             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2383                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2384                                 st_index[AVMEDIA_TYPE_VIDEO],
2385                                 NULL, 0);
2386     if (!video_disable)
2387         st_index[AVMEDIA_TYPE_SUBTITLE] =
2388             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2389                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2390                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2391                                  st_index[AVMEDIA_TYPE_AUDIO] :
2392                                  st_index[AVMEDIA_TYPE_VIDEO]),
2393                                 NULL, 0);
2394     if (show_status) {
2395         av_dump_format(ic, 0, is->filename, 0);
2396     }
2397
2398     is->show_mode = show_mode;
2399
2400     /* open the streams */
2401     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2402         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2403     }
2404
2405     ret=-1;
2406     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2407         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2408     }
2409     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2410     if (is->show_mode == SHOW_MODE_NONE)
2411         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2412
2413     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2414         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2415     }
2416
2417     if (is->video_stream < 0 && is->audio_stream < 0) {
2418         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2419         ret = -1;
2420         goto fail;
2421     }
2422
2423     for(;;) {
2424         if (is->abort_request)
2425             break;
2426         if (is->paused != is->last_paused) {
2427             is->last_paused = is->paused;
2428             if (is->paused)
2429                 is->read_pause_return= av_read_pause(ic);
2430             else
2431                 av_read_play(ic);
2432         }
2433 #if CONFIG_RTSP_DEMUXER
2434         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2435             /* wait 10 ms to avoid trying to get another packet */
2436             /* XXX: horrible */
2437             SDL_Delay(10);
2438             continue;
2439         }
2440 #endif
2441         if (is->seek_req) {
2442             int64_t seek_target= is->seek_pos;
2443             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2444             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2445 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2446 //      of the seek_pos/seek_rel variables
2447
2448             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2449             if (ret < 0) {
2450                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2451             }else{
2452                 if (is->audio_stream >= 0) {
2453                     packet_queue_flush(&is->audioq);
2454                     packet_queue_put(&is->audioq, &flush_pkt);
2455                 }
2456                 if (is->subtitle_stream >= 0) {
2457                     packet_queue_flush(&is->subtitleq);
2458                     packet_queue_put(&is->subtitleq, &flush_pkt);
2459                 }
2460                 if (is->video_stream >= 0) {
2461                     packet_queue_flush(&is->videoq);
2462                     packet_queue_put(&is->videoq, &flush_pkt);
2463                 }
2464             }
2465             is->seek_req = 0;
2466             eof= 0;
2467         }
2468
2469         /* if the queue are full, no need to read more */
2470         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2471             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2472                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2473                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2474             /* wait 10 ms */
2475             SDL_Delay(10);
2476             continue;
2477         }
2478         if(eof) {
2479             if(is->video_stream >= 0){
2480                 av_init_packet(pkt);
2481                 pkt->data=NULL;
2482                 pkt->size=0;
2483                 pkt->stream_index= is->video_stream;
2484                 packet_queue_put(&is->videoq, pkt);
2485             }
2486             SDL_Delay(10);
2487             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2488                 if(loop!=1 && (!loop || --loop)){
2489                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2490                 }else if(autoexit){
2491                     ret=AVERROR_EOF;
2492                     goto fail;
2493                 }
2494             }
2495             eof=0;
2496             continue;
2497         }
2498         ret = av_read_frame(ic, pkt);
2499         if (ret < 0) {
2500             if (ret == AVERROR_EOF || url_feof(ic->pb))
2501                 eof=1;
2502             if (ic->pb && ic->pb->error)
2503                 break;
2504             SDL_Delay(100); /* wait for user event */
2505             continue;
2506         }
2507         /* check if packet is in play range specified by user, then queue, otherwise discard */
2508         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2509                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2510                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2511                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2512                 <= ((double)duration/1000000);
2513         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2514             packet_queue_put(&is->audioq, pkt);
2515         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2516             packet_queue_put(&is->videoq, pkt);
2517         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2518             packet_queue_put(&is->subtitleq, pkt);
2519         } else {
2520             av_free_packet(pkt);
2521         }
2522     }
2523     /* wait until the end */
2524     while (!is->abort_request) {
2525         SDL_Delay(100);
2526     }
2527
2528     ret = 0;
2529  fail:
2530     /* disable interrupting */
2531     global_video_state = NULL;
2532
2533     /* close each stream */
2534     if (is->audio_stream >= 0)
2535         stream_component_close(is, is->audio_stream);
2536     if (is->video_stream >= 0)
2537         stream_component_close(is, is->video_stream);
2538     if (is->subtitle_stream >= 0)
2539         stream_component_close(is, is->subtitle_stream);
2540     if (is->ic) {
2541         av_close_input_file(is->ic);
2542         is->ic = NULL; /* safety */
2543     }
2544     avio_set_interrupt_cb(NULL);
2545
2546     if (ret != 0) {
2547         SDL_Event event;
2548
2549         event.type = FF_QUIT_EVENT;
2550         event.user.data1 = is;
2551         SDL_PushEvent(&event);
2552     }
2553     return 0;
2554 }
2555
2556 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2557 {
2558     VideoState *is;
2559
2560     is = av_mallocz(sizeof(VideoState));
2561     if (!is)
2562         return NULL;
2563     av_strlcpy(is->filename, filename, sizeof(is->filename));
2564     is->iformat = iformat;
2565     is->ytop = 0;
2566     is->xleft = 0;
2567
2568     /* start video display */
2569     is->pictq_mutex = SDL_CreateMutex();
2570     is->pictq_cond = SDL_CreateCond();
2571
2572     is->subpq_mutex = SDL_CreateMutex();
2573     is->subpq_cond = SDL_CreateCond();
2574
2575     is->av_sync_type = av_sync_type;
2576     is->read_tid = SDL_CreateThread(read_thread, is);
2577     if (!is->read_tid) {
2578         av_free(is);
2579         return NULL;
2580     }
2581     return is;
2582 }
2583
2584 static void stream_cycle_channel(VideoState *is, int codec_type)
2585 {
2586     AVFormatContext *ic = is->ic;
2587     int start_index, stream_index;
2588     AVStream *st;
2589
2590     if (codec_type == AVMEDIA_TYPE_VIDEO)
2591         start_index = is->video_stream;
2592     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2593         start_index = is->audio_stream;
2594     else
2595         start_index = is->subtitle_stream;
2596     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2597         return;
2598     stream_index = start_index;
2599     for(;;) {
2600         if (++stream_index >= is->ic->nb_streams)
2601         {
2602             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2603             {
2604                 stream_index = -1;
2605                 goto the_end;
2606             } else
2607                 stream_index = 0;
2608         }
2609         if (stream_index == start_index)
2610             return;
2611         st = ic->streams[stream_index];
2612         if (st->codec->codec_type == codec_type) {
2613             /* check that parameters are OK */
2614             switch(codec_type) {
2615             case AVMEDIA_TYPE_AUDIO:
2616                 if (st->codec->sample_rate != 0 &&
2617                     st->codec->channels != 0)
2618                     goto the_end;
2619                 break;
2620             case AVMEDIA_TYPE_VIDEO:
2621             case AVMEDIA_TYPE_SUBTITLE:
2622                 goto the_end;
2623             default:
2624                 break;
2625             }
2626         }
2627     }
2628  the_end:
2629     stream_component_close(is, start_index);
2630     stream_component_open(is, stream_index);
2631 }
2632
2633
2634 static void toggle_full_screen(void)
2635 {
2636     is_full_screen = !is_full_screen;
2637     video_open(cur_stream);
2638 }
2639
2640 static void toggle_pause(void)
2641 {
2642     if (cur_stream)
2643         stream_toggle_pause(cur_stream);
2644     step = 0;
2645 }
2646
2647 static void step_to_next_frame(void)
2648 {
2649     if (cur_stream) {
2650         /* if the stream is paused unpause it, then step */
2651         if (cur_stream->paused)
2652             stream_toggle_pause(cur_stream);
2653     }
2654     step = 1;
2655 }
2656
2657 static void toggle_audio_display(void)
2658 {
2659     if (cur_stream) {
2660         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2661         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2662         fill_rectangle(screen,
2663                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2664                     bgcolor);
2665         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2666     }
2667 }
2668
2669 /* handle an event sent by the GUI */
2670 static void event_loop(void)
2671 {
2672     SDL_Event event;
2673     double incr, pos, frac;
2674
2675     for(;;) {
2676         double x;
2677         SDL_WaitEvent(&event);
2678         switch(event.type) {
2679         case SDL_KEYDOWN:
2680             if (exit_on_keydown) {
2681                 do_exit();
2682                 break;
2683             }
2684             switch(event.key.keysym.sym) {
2685             case SDLK_ESCAPE:
2686             case SDLK_q:
2687                 do_exit();
2688                 break;
2689             case SDLK_f:
2690                 toggle_full_screen();
2691                 break;
2692             case SDLK_p:
2693             case SDLK_SPACE:
2694                 toggle_pause();
2695                 break;
2696             case SDLK_s: //S: Step to next frame
2697                 step_to_next_frame();
2698                 break;
2699             case SDLK_a:
2700                 if (cur_stream)
2701                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2702                 break;
2703             case SDLK_v:
2704                 if (cur_stream)
2705                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2706                 break;
2707             case SDLK_t:
2708                 if (cur_stream)
2709                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2710                 break;
2711             case SDLK_w:
2712                 toggle_audio_display();
2713                 break;
2714             case SDLK_LEFT:
2715                 incr = -10.0;
2716                 goto do_seek;
2717             case SDLK_RIGHT:
2718                 incr = 10.0;
2719                 goto do_seek;
2720             case SDLK_UP:
2721                 incr = 60.0;
2722                 goto do_seek;
2723             case SDLK_DOWN:
2724                 incr = -60.0;
2725             do_seek:
2726                 if (cur_stream) {
2727                     if (seek_by_bytes) {
2728                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2729                             pos= cur_stream->video_current_pos;
2730                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2731                             pos= cur_stream->audio_pkt.pos;
2732                         }else
2733                             pos = avio_tell(cur_stream->ic->pb);
2734                         if (cur_stream->ic->bit_rate)
2735                             incr *= cur_stream->ic->bit_rate / 8.0;
2736                         else
2737                             incr *= 180000.0;
2738                         pos += incr;
2739                         stream_seek(cur_stream, pos, incr, 1);
2740                     } else {
2741                         pos = get_master_clock(cur_stream);
2742                         pos += incr;
2743                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2744                     }
2745                 }
2746                 break;
2747             default:
2748                 break;
2749             }
2750             break;
2751         case SDL_MOUSEBUTTONDOWN:
2752             if (exit_on_mousedown) {
2753                 do_exit();
2754                 break;
2755             }
2756         case SDL_MOUSEMOTION:
2757             if(event.type ==SDL_MOUSEBUTTONDOWN){
2758                 x= event.button.x;
2759             }else{
2760                 if(event.motion.state != SDL_PRESSED)
2761                     break;
2762                 x= event.motion.x;
2763             }
2764             if (cur_stream) {
2765                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2766                     uint64_t size=  avio_size(cur_stream->ic->pb);
2767                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2768                 }else{
2769                     int64_t ts;
2770                     int ns, hh, mm, ss;
2771                     int tns, thh, tmm, tss;
2772                     tns = cur_stream->ic->duration/1000000LL;
2773                     thh = tns/3600;
2774                     tmm = (tns%3600)/60;
2775                     tss = (tns%60);
2776                     frac = x/cur_stream->width;
2777                     ns = frac*tns;
2778                     hh = ns/3600;
2779                     mm = (ns%3600)/60;
2780                     ss = (ns%60);
2781                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2782                             hh, mm, ss, thh, tmm, tss);
2783                     ts = frac*cur_stream->ic->duration;
2784                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2785                         ts += cur_stream->ic->start_time;
2786                     stream_seek(cur_stream, ts, 0, 0);
2787                 }
2788             }
2789             break;
2790         case SDL_VIDEORESIZE:
2791             if (cur_stream) {
2792                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2793                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2794                 screen_width = cur_stream->width = event.resize.w;
2795                 screen_height= cur_stream->height= event.resize.h;
2796             }
2797             break;
2798         case SDL_QUIT:
2799         case FF_QUIT_EVENT:
2800             do_exit();
2801             break;
2802         case FF_ALLOC_EVENT:
2803             video_open(event.user.data1);
2804             alloc_picture(event.user.data1);
2805             break;
2806         case FF_REFRESH_EVENT:
2807             video_refresh(event.user.data1);
2808             cur_stream->refresh=0;
2809             break;
2810         default:
2811             break;
2812         }
2813     }
2814 }
2815
2816 static int opt_frame_size(const char *opt, const char *arg)
2817 {
2818     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2819         fprintf(stderr, "Incorrect frame size\n");
2820         return AVERROR(EINVAL);
2821     }
2822     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2823         fprintf(stderr, "Frame size must be a multiple of 2\n");
2824         return AVERROR(EINVAL);
2825     }
2826     return 0;
2827 }
2828
2829 static int opt_width(const char *opt, const char *arg)
2830 {
2831     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2832     return 0;
2833 }
2834
2835 static int opt_height(const char *opt, const char *arg)
2836 {
2837     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2838     return 0;
2839 }
2840
2841 static int opt_format(const char *opt, const char *arg)
2842 {
2843     file_iformat = av_find_input_format(arg);
2844     if (!file_iformat) {
2845         fprintf(stderr, "Unknown input format: %s\n", arg);
2846         return AVERROR(EINVAL);
2847     }
2848     return 0;
2849 }
2850
2851 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2852 {
2853     frame_pix_fmt = av_get_pix_fmt(arg);
2854     return 0;
2855 }
2856
2857 static int opt_sync(const char *opt, const char *arg)
2858 {
2859     if (!strcmp(arg, "audio"))
2860         av_sync_type = AV_SYNC_AUDIO_MASTER;
2861     else if (!strcmp(arg, "video"))
2862         av_sync_type = AV_SYNC_VIDEO_MASTER;
2863     else if (!strcmp(arg, "ext"))
2864         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2865     else {
2866         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2867         exit(1);
2868     }
2869     return 0;
2870 }
2871
2872 static int opt_seek(const char *opt, const char *arg)
2873 {
2874     start_time = parse_time_or_die(opt, arg, 1);
2875     return 0;
2876 }
2877
2878 static int opt_duration(const char *opt, const char *arg)
2879 {
2880     duration = parse_time_or_die(opt, arg, 1);
2881     return 0;
2882 }
2883
2884 static int opt_thread_count(const char *opt, const char *arg)
2885 {
2886     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2887 #if !HAVE_THREADS
2888     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2889 #endif
2890     return 0;
2891 }
2892
2893 static int opt_show_mode(const char *opt, const char *arg)
2894 {
2895     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2896                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2897                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2898                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2899     return 0;
2900 }
2901
2902 static int opt_input_file(const char *opt, const char *filename)
2903 {
2904     if (input_filename) {
2905         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2906                 filename, input_filename);
2907         exit(1);
2908     }
2909     if (!strcmp(filename, "-"))
2910         filename = "pipe:";
2911     input_filename = filename;
2912     return 0;
2913 }
2914
2915 static const OptionDef options[] = {
2916 #include "cmdutils_common_opts.h"
2917     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2918     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2919     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2920     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2921     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2922     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2923     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2924     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2925     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2926     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2927     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2928     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2929     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2930     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2931     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2932     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2933     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2934     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2935     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2936     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2937     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2938     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2939     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2940     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2941     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2942     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2943     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2944     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2945     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2946     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2947     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2948     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2949     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2950     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2951     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2952 #if CONFIG_AVFILTER
2953     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2954 #endif
2955     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2956     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2957     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2958     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2959     { NULL, },
2960 };
2961
2962 static void show_usage(void)
2963 {
2964     printf("Simple media player\n");
2965     printf("usage: ffplay [options] input_file\n");
2966     printf("\n");
2967 }
2968
2969 static void show_help(void)
2970 {
2971     av_log_set_callback(log_callback_help);
2972     show_usage();
2973     show_help_options(options, "Main options:\n",
2974                       OPT_EXPERT, 0);
2975     show_help_options(options, "\nAdvanced options:\n",
2976                       OPT_EXPERT, OPT_EXPERT);
2977     printf("\n");
2978     av_opt_show2(avcodec_opts[0], NULL,
2979                  AV_OPT_FLAG_DECODING_PARAM, 0);
2980     printf("\n");
2981     av_opt_show2(avformat_opts, NULL,
2982                  AV_OPT_FLAG_DECODING_PARAM, 0);
2983 #if !CONFIG_AVFILTER
2984     printf("\n");
2985     av_opt_show2(sws_opts, NULL,
2986                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2987 #endif
2988     printf("\nWhile playing:\n"
2989            "q, ESC              quit\n"
2990            "f                   toggle full screen\n"
2991            "p, SPC              pause\n"
2992            "a                   cycle audio channel\n"
2993            "v                   cycle video channel\n"
2994            "t                   cycle subtitle channel\n"
2995            "w                   show audio waves\n"
2996            "s                   activate frame-step mode\n"
2997            "left/right          seek backward/forward 10 seconds\n"
2998            "down/up             seek backward/forward 1 minute\n"
2999            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3000            );
3001 }
3002
3003 /* Called from the main */
3004 int main(int argc, char **argv)
3005 {
3006     int flags;
3007
3008     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3009
3010     /* register all codecs, demux and protocols */
3011     avcodec_register_all();
3012 #if CONFIG_AVDEVICE
3013     avdevice_register_all();
3014 #endif
3015 #if CONFIG_AVFILTER
3016     avfilter_register_all();
3017 #endif
3018     av_register_all();
3019
3020     init_opts();
3021
3022     show_banner();
3023
3024     parse_options(argc, argv, options, opt_input_file);
3025
3026     if (!input_filename) {
3027         show_usage();
3028         fprintf(stderr, "An input file must be specified\n");
3029         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3030         exit(1);
3031     }
3032
3033     if (display_disable) {
3034         video_disable = 1;
3035     }
3036     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3037 #if !defined(__MINGW32__) && !defined(__APPLE__)
3038     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3039 #endif
3040     if (SDL_Init (flags)) {
3041         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3042         exit(1);
3043     }
3044
3045     if (!display_disable) {
3046 #if HAVE_SDL_VIDEO_SIZE
3047         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3048         fs_screen_width = vi->current_w;
3049         fs_screen_height = vi->current_h;
3050 #endif
3051     }
3052
3053     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3054     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3055     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3056
3057     av_init_packet(&flush_pkt);
3058     flush_pkt.data= "FLUSH";
3059
3060     cur_stream = stream_open(input_filename, file_iformat);
3061
3062     event_loop();
3063
3064     /* never returns */
3065
3066     return 0;
3067 }