ffplay: more precise audio clock based on current time
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static int opt_help(const char *opt, const char *arg);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int step = 0;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
282 {
283     AVPacketList *pkt1;
284
285     /* duplicate the packet */
286     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
287         return -1;
288
289     pkt1 = av_malloc(sizeof(AVPacketList));
290     if (!pkt1)
291         return -1;
292     pkt1->pkt = *pkt;
293     pkt1->next = NULL;
294
295
296     SDL_LockMutex(q->mutex);
297
298     if (!q->last_pkt)
299
300         q->first_pkt = pkt1;
301     else
302         q->last_pkt->next = pkt1;
303     q->last_pkt = pkt1;
304     q->nb_packets++;
305     q->size += pkt1->pkt.size + sizeof(*pkt1);
306     /* XXX: should duplicate packet data in DV case */
307     SDL_CondSignal(q->cond);
308
309     SDL_UnlockMutex(q->mutex);
310     return 0;
311 }
312
313 /* packet queue handling */
314 static void packet_queue_init(PacketQueue *q)
315 {
316     memset(q, 0, sizeof(PacketQueue));
317     q->mutex = SDL_CreateMutex();
318     q->cond = SDL_CreateCond();
319     packet_queue_put(q, &flush_pkt);
320 }
321
322 static void packet_queue_flush(PacketQueue *q)
323 {
324     AVPacketList *pkt, *pkt1;
325
326     SDL_LockMutex(q->mutex);
327     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
328         pkt1 = pkt->next;
329         av_free_packet(&pkt->pkt);
330         av_freep(&pkt);
331     }
332     q->last_pkt = NULL;
333     q->first_pkt = NULL;
334     q->nb_packets = 0;
335     q->size = 0;
336     SDL_UnlockMutex(q->mutex);
337 }
338
339 static void packet_queue_end(PacketQueue *q)
340 {
341     packet_queue_flush(q);
342     SDL_DestroyMutex(q->mutex);
343     SDL_DestroyCond(q->cond);
344 }
345
346 static void packet_queue_abort(PacketQueue *q)
347 {
348     SDL_LockMutex(q->mutex);
349
350     q->abort_request = 1;
351
352     SDL_CondSignal(q->cond);
353
354     SDL_UnlockMutex(q->mutex);
355 }
356
357 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
358 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
359 {
360     AVPacketList *pkt1;
361     int ret;
362
363     SDL_LockMutex(q->mutex);
364
365     for(;;) {
366         if (q->abort_request) {
367             ret = -1;
368             break;
369         }
370
371         pkt1 = q->first_pkt;
372         if (pkt1) {
373             q->first_pkt = pkt1->next;
374             if (!q->first_pkt)
375                 q->last_pkt = NULL;
376             q->nb_packets--;
377             q->size -= pkt1->pkt.size + sizeof(*pkt1);
378             *pkt = pkt1->pkt;
379             av_free(pkt1);
380             ret = 1;
381             break;
382         } else if (!block) {
383             ret = 0;
384             break;
385         } else {
386             SDL_CondWait(q->cond, q->mutex);
387         }
388     }
389     SDL_UnlockMutex(q->mutex);
390     return ret;
391 }
392
393 static inline void fill_rectangle(SDL_Surface *screen,
394                                   int x, int y, int w, int h, int color)
395 {
396     SDL_Rect rect;
397     rect.x = x;
398     rect.y = y;
399     rect.w = w;
400     rect.h = h;
401     SDL_FillRect(screen, &rect, color);
402 }
403
404 #define ALPHA_BLEND(a, oldp, newp, s)\
405 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
406
407 #define RGBA_IN(r, g, b, a, s)\
408 {\
409     unsigned int v = ((const uint32_t *)(s))[0];\
410     a = (v >> 24) & 0xff;\
411     r = (v >> 16) & 0xff;\
412     g = (v >> 8) & 0xff;\
413     b = v & 0xff;\
414 }
415
416 #define YUVA_IN(y, u, v, a, s, pal)\
417 {\
418     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
419     a = (val >> 24) & 0xff;\
420     y = (val >> 16) & 0xff;\
421     u = (val >> 8) & 0xff;\
422     v = val & 0xff;\
423 }
424
425 #define YUVA_OUT(d, y, u, v, a)\
426 {\
427     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
428 }
429
430
431 #define BPP 1
432
433 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
434 {
435     int wrap, wrap3, width2, skip2;
436     int y, u, v, a, u1, v1, a1, w, h;
437     uint8_t *lum, *cb, *cr;
438     const uint8_t *p;
439     const uint32_t *pal;
440     int dstx, dsty, dstw, dsth;
441
442     dstw = av_clip(rect->w, 0, imgw);
443     dsth = av_clip(rect->h, 0, imgh);
444     dstx = av_clip(rect->x, 0, imgw - dstw);
445     dsty = av_clip(rect->y, 0, imgh - dsth);
446     lum = dst->data[0] + dsty * dst->linesize[0];
447     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
448     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
449
450     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
451     skip2 = dstx >> 1;
452     wrap = dst->linesize[0];
453     wrap3 = rect->pict.linesize[0];
454     p = rect->pict.data[0];
455     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
456
457     if (dsty & 1) {
458         lum += dstx;
459         cb += skip2;
460         cr += skip2;
461
462         if (dstx & 1) {
463             YUVA_IN(y, u, v, a, p, pal);
464             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
466             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
467             cb++;
468             cr++;
469             lum++;
470             p += BPP;
471         }
472         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
473             YUVA_IN(y, u, v, a, p, pal);
474             u1 = u;
475             v1 = v;
476             a1 = a;
477             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478
479             YUVA_IN(y, u, v, a, p + BPP, pal);
480             u1 += u;
481             v1 += v;
482             a1 += a;
483             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
484             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
485             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
486             cb++;
487             cr++;
488             p += 2 * BPP;
489             lum += 2;
490         }
491         if (w) {
492             YUVA_IN(y, u, v, a, p, pal);
493             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
494             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
495             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
496             p++;
497             lum++;
498         }
499         p += wrap3 - dstw * BPP;
500         lum += wrap - dstw - dstx;
501         cb += dst->linesize[1] - width2 - skip2;
502         cr += dst->linesize[2] - width2 - skip2;
503     }
504     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505         lum += dstx;
506         cb += skip2;
507         cr += skip2;
508
509         if (dstx & 1) {
510             YUVA_IN(y, u, v, a, p, pal);
511             u1 = u;
512             v1 = v;
513             a1 = a;
514             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515             p += wrap3;
516             lum += wrap;
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 += u;
519             v1 += v;
520             a1 += a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524             cb++;
525             cr++;
526             p += -wrap3 + BPP;
527             lum += -wrap + 1;
528         }
529         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 = u;
532             v1 = v;
533             a1 = a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535
536             YUVA_IN(y, u, v, a, p + BPP, pal);
537             u1 += u;
538             v1 += v;
539             a1 += a;
540             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541             p += wrap3;
542             lum += wrap;
543
544             YUVA_IN(y, u, v, a, p, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
550             YUVA_IN(y, u, v, a, p + BPP, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555
556             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558
559             cb++;
560             cr++;
561             p += -wrap3 + 2 * BPP;
562             lum += -wrap + 2;
563         }
564         if (w) {
565             YUVA_IN(y, u, v, a, p, pal);
566             u1 = u;
567             v1 = v;
568             a1 = a;
569             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570             p += wrap3;
571             lum += wrap;
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 += u;
574             v1 += v;
575             a1 += a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579             cb++;
580             cr++;
581             p += -wrap3 + BPP;
582             lum += -wrap + 1;
583         }
584         p += wrap3 + (wrap3 - dstw * BPP);
585         lum += wrap + (wrap - dstw - dstx);
586         cb += dst->linesize[1] - width2 - skip2;
587         cr += dst->linesize[2] - width2 - skip2;
588     }
589     /* handle odd height */
590     if (h) {
591         lum += dstx;
592         cb += skip2;
593         cr += skip2;
594
595         if (dstx & 1) {
596             YUVA_IN(y, u, v, a, p, pal);
597             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600             cb++;
601             cr++;
602             lum++;
603             p += BPP;
604         }
605         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606             YUVA_IN(y, u, v, a, p, pal);
607             u1 = u;
608             v1 = v;
609             a1 = a;
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611
612             YUVA_IN(y, u, v, a, p + BPP, pal);
613             u1 += u;
614             v1 += v;
615             a1 += a;
616             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619             cb++;
620             cr++;
621             p += 2 * BPP;
622             lum += 2;
623         }
624         if (w) {
625             YUVA_IN(y, u, v, a, p, pal);
626             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
629         }
630     }
631 }
632
633 static void free_subpicture(SubPicture *sp)
634 {
635     avsubtitle_free(&sp->sub);
636 }
637
638 static void video_image_display(VideoState *is)
639 {
640     VideoPicture *vp;
641     SubPicture *sp;
642     AVPicture pict;
643     float aspect_ratio;
644     int width, height, x, y;
645     SDL_Rect rect;
646     int i;
647
648     vp = &is->pictq[is->pictq_rindex];
649     if (vp->bmp) {
650 #if CONFIG_AVFILTER
651          if (vp->picref->video->sample_aspect_ratio.num == 0)
652              aspect_ratio = 0;
653          else
654              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
655 #else
656
657         /* XXX: use variable in the frame */
658         if (is->video_st->sample_aspect_ratio.num)
659             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
660         else if (is->video_st->codec->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
662         else
663             aspect_ratio = 0;
664 #endif
665         if (aspect_ratio <= 0.0)
666             aspect_ratio = 1.0;
667         aspect_ratio *= (float)vp->width / (float)vp->height;
668
669         if (is->subtitle_st) {
670             if (is->subpq_size > 0) {
671                 sp = &is->subpq[is->subpq_rindex];
672
673                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
674                     SDL_LockYUVOverlay (vp->bmp);
675
676                     pict.data[0] = vp->bmp->pixels[0];
677                     pict.data[1] = vp->bmp->pixels[2];
678                     pict.data[2] = vp->bmp->pixels[1];
679
680                     pict.linesize[0] = vp->bmp->pitches[0];
681                     pict.linesize[1] = vp->bmp->pitches[2];
682                     pict.linesize[2] = vp->bmp->pitches[1];
683
684                     for (i = 0; i < sp->sub.num_rects; i++)
685                         blend_subrect(&pict, sp->sub.rects[i],
686                                       vp->bmp->w, vp->bmp->h);
687
688                     SDL_UnlockYUVOverlay (vp->bmp);
689                 }
690             }
691         }
692
693
694         /* XXX: we suppose the screen has a 1.0 pixel ratio */
695         height = is->height;
696         width = ((int)rint(height * aspect_ratio)) & ~1;
697         if (width > is->width) {
698             width = is->width;
699             height = ((int)rint(width / aspect_ratio)) & ~1;
700         }
701         x = (is->width - width) / 2;
702         y = (is->height - height) / 2;
703         is->no_background = 0;
704         rect.x = is->xleft + x;
705         rect.y = is->ytop  + y;
706         rect.w = FFMAX(width,  1);
707         rect.h = FFMAX(height, 1);
708         SDL_DisplayYUVOverlay(vp->bmp, &rect);
709     }
710 }
711
712 static inline int compute_mod(int a, int b)
713 {
714     return a < 0 ? a%b + b : a%b;
715 }
716
717 static void video_audio_display(VideoState *s)
718 {
719     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
720     int ch, channels, h, h2, bgcolor, fgcolor;
721     int16_t time_diff;
722     int rdft_bits, nb_freq;
723
724     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
725         ;
726     nb_freq= 1<<(rdft_bits-1);
727
728     /* compute display index : center on currently output samples */
729     channels = s->audio_st->codec->channels;
730     nb_display_channels = channels;
731     if (!s->paused) {
732         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
733         n = 2 * channels;
734         delay = s->audio_write_buf_size;
735         delay /= n;
736
737         /* to be more precise, we take into account the time spent since
738            the last buffer computation */
739         if (audio_callback_time) {
740             time_diff = av_gettime() - audio_callback_time;
741             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
742         }
743
744         delay += 2*data_used;
745         if (delay < data_used)
746             delay = data_used;
747
748         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
749         if (s->show_mode == SHOW_MODE_WAVES) {
750             h= INT_MIN;
751             for(i=0; i<1000; i+=channels){
752                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
753                 int a= s->sample_array[idx];
754                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
755                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
756                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
757                 int score= a-d;
758                 if(h<score && (b^c)<0){
759                     h= score;
760                     i_start= idx;
761                 }
762             }
763         }
764
765         s->last_i_start = i_start;
766     } else {
767         i_start = s->last_i_start;
768     }
769
770     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
771     if (s->show_mode == SHOW_MODE_WAVES) {
772         fill_rectangle(screen,
773                        s->xleft, s->ytop, s->width, s->height,
774                        bgcolor);
775
776         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
777
778         /* total height for one channel */
779         h = s->height / nb_display_channels;
780         /* graph height / 2 */
781         h2 = (h * 9) / 20;
782         for(ch = 0;ch < nb_display_channels; ch++) {
783             i = i_start + ch;
784             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
785             for(x = 0; x < s->width; x++) {
786                 y = (s->sample_array[i] * h2) >> 15;
787                 if (y < 0) {
788                     y = -y;
789                     ys = y1 - y;
790                 } else {
791                     ys = y1;
792                 }
793                 fill_rectangle(screen,
794                                s->xleft + x, ys, 1, y,
795                                fgcolor);
796                 i += channels;
797                 if (i >= SAMPLE_ARRAY_SIZE)
798                     i -= SAMPLE_ARRAY_SIZE;
799             }
800         }
801
802         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
803
804         for(ch = 1;ch < nb_display_channels; ch++) {
805             y = s->ytop + ch * h;
806             fill_rectangle(screen,
807                            s->xleft, y, s->width, 1,
808                            fgcolor);
809         }
810         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
811     }else{
812         nb_display_channels= FFMIN(nb_display_channels, 2);
813         if(rdft_bits != s->rdft_bits){
814             av_rdft_end(s->rdft);
815             av_free(s->rdft_data);
816             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
817             s->rdft_bits= rdft_bits;
818             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
819         }
820         {
821             FFTSample *data[2];
822             for(ch = 0;ch < nb_display_channels; ch++) {
823                 data[ch] = s->rdft_data + 2*nb_freq*ch;
824                 i = i_start + ch;
825                 for(x = 0; x < 2*nb_freq; x++) {
826                     double w= (x-nb_freq)*(1.0/nb_freq);
827                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
828                     i += channels;
829                     if (i >= SAMPLE_ARRAY_SIZE)
830                         i -= SAMPLE_ARRAY_SIZE;
831                 }
832                 av_rdft_calc(s->rdft, data[ch]);
833             }
834             //least efficient way to do this, we should of course directly access it but its more than fast enough
835             for(y=0; y<s->height; y++){
836                 double w= 1/sqrt(nb_freq);
837                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
838                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
839                        + data[1][2*y+1]*data[1][2*y+1])) : a;
840                 a= FFMIN(a,255);
841                 b= FFMIN(b,255);
842                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
843
844                 fill_rectangle(screen,
845                             s->xpos, s->height-y, 1, 1,
846                             fgcolor);
847             }
848         }
849         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
850         s->xpos++;
851         if(s->xpos >= s->width)
852             s->xpos= s->xleft;
853     }
854 }
855
856 static void stream_close(VideoState *is)
857 {
858     VideoPicture *vp;
859     int i;
860     /* XXX: use a special url_shutdown call to abort parse cleanly */
861     is->abort_request = 1;
862     SDL_WaitThread(is->read_tid, NULL);
863     SDL_WaitThread(is->refresh_tid, NULL);
864
865     /* free all pictures */
866     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
867         vp = &is->pictq[i];
868 #if CONFIG_AVFILTER
869         if (vp->picref) {
870             avfilter_unref_buffer(vp->picref);
871             vp->picref = NULL;
872         }
873 #endif
874         if (vp->bmp) {
875             SDL_FreeYUVOverlay(vp->bmp);
876             vp->bmp = NULL;
877         }
878     }
879     SDL_DestroyMutex(is->pictq_mutex);
880     SDL_DestroyCond(is->pictq_cond);
881     SDL_DestroyMutex(is->subpq_mutex);
882     SDL_DestroyCond(is->subpq_cond);
883 #if !CONFIG_AVFILTER
884     if (is->img_convert_ctx)
885         sws_freeContext(is->img_convert_ctx);
886 #endif
887     av_free(is);
888 }
889
890 static void do_exit(void)
891 {
892     if (cur_stream) {
893         stream_close(cur_stream);
894         cur_stream = NULL;
895     }
896     uninit_opts();
897 #if CONFIG_AVFILTER
898     avfilter_uninit();
899 #endif
900     if (show_status)
901         printf("\n");
902     SDL_Quit();
903     av_log(NULL, AV_LOG_QUIET, "%s", "");
904     exit(0);
905 }
906
907 static int video_open(VideoState *is){
908     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
909     int w,h;
910
911     if(is_full_screen) flags |= SDL_FULLSCREEN;
912     else               flags |= SDL_RESIZABLE;
913
914     if (is_full_screen && fs_screen_width) {
915         w = fs_screen_width;
916         h = fs_screen_height;
917     } else if(!is_full_screen && screen_width){
918         w = screen_width;
919         h = screen_height;
920 #if CONFIG_AVFILTER
921     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
922         w = is->out_video_filter->inputs[0]->w;
923         h = is->out_video_filter->inputs[0]->h;
924 #else
925     }else if (is->video_st && is->video_st->codec->width){
926         w = is->video_st->codec->width;
927         h = is->video_st->codec->height;
928 #endif
929     } else {
930         w = 640;
931         h = 480;
932     }
933     if(screen && is->width == screen->w && screen->w == w
934        && is->height== screen->h && screen->h == h)
935         return 0;
936
937 #ifndef __APPLE__
938     screen = SDL_SetVideoMode(w, h, 0, flags);
939 #else
940     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
941     screen = SDL_SetVideoMode(w, h, 24, flags);
942 #endif
943     if (!screen) {
944         fprintf(stderr, "SDL: could not set video mode - exiting\n");
945         do_exit();
946     }
947     if (!window_title)
948         window_title = input_filename;
949     SDL_WM_SetCaption(window_title, window_title);
950
951     is->width = screen->w;
952     is->height = screen->h;
953
954     return 0;
955 }
956
957 /* display the current picture, if any */
958 static void video_display(VideoState *is)
959 {
960     if(!screen)
961         video_open(cur_stream);
962     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
963         video_audio_display(is);
964     else if (is->video_st)
965         video_image_display(is);
966 }
967
968 static int refresh_thread(void *opaque)
969 {
970     VideoState *is= opaque;
971     while(!is->abort_request){
972         SDL_Event event;
973         event.type = FF_REFRESH_EVENT;
974         event.user.data1 = opaque;
975         if(!is->refresh){
976             is->refresh=1;
977             SDL_PushEvent(&event);
978         }
979         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
980         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
981     }
982     return 0;
983 }
984
985 /* get the current audio clock value */
986 static double get_audio_clock(VideoState *is)
987 {
988     if (is->paused) {
989         return is->audio_current_pts;
990     } else {
991         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
992     }
993 }
994
995 /* get the current video clock value */
996 static double get_video_clock(VideoState *is)
997 {
998     if (is->paused) {
999         return is->video_current_pts;
1000     } else {
1001         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1002     }
1003 }
1004
1005 /* get the current external clock value */
1006 static double get_external_clock(VideoState *is)
1007 {
1008     int64_t ti;
1009     ti = av_gettime();
1010     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1011 }
1012
1013 /* get the current master clock value */
1014 static double get_master_clock(VideoState *is)
1015 {
1016     double val;
1017
1018     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1019         if (is->video_st)
1020             val = get_video_clock(is);
1021         else
1022             val = get_audio_clock(is);
1023     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1024         if (is->audio_st)
1025             val = get_audio_clock(is);
1026         else
1027             val = get_video_clock(is);
1028     } else {
1029         val = get_external_clock(is);
1030     }
1031     return val;
1032 }
1033
1034 /* seek in the stream */
1035 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1036 {
1037     if (!is->seek_req) {
1038         is->seek_pos = pos;
1039         is->seek_rel = rel;
1040         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1041         if (seek_by_bytes)
1042             is->seek_flags |= AVSEEK_FLAG_BYTE;
1043         is->seek_req = 1;
1044     }
1045 }
1046
1047 /* pause or resume the video */
1048 static void stream_toggle_pause(VideoState *is)
1049 {
1050     if (is->paused) {
1051         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1052         if(is->read_pause_return != AVERROR(ENOSYS)){
1053             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1054         }
1055         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1056     }
1057     is->paused = !is->paused;
1058 }
1059
1060 static double compute_target_time(double frame_current_pts, VideoState *is)
1061 {
1062     double delay, sync_threshold, diff;
1063
1064     /* compute nominal delay */
1065     delay = frame_current_pts - is->frame_last_pts;
1066     if (delay <= 0 || delay >= 10.0) {
1067         /* if incorrect delay, use previous one */
1068         delay = is->frame_last_delay;
1069     } else {
1070         is->frame_last_delay = delay;
1071     }
1072     is->frame_last_pts = frame_current_pts;
1073
1074     /* update delay to follow master synchronisation source */
1075     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1076          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1077         /* if video is slave, we try to correct big delays by
1078            duplicating or deleting a frame */
1079         diff = get_video_clock(is) - get_master_clock(is);
1080
1081         /* skip or repeat frame. We take into account the
1082            delay to compute the threshold. I still don't know
1083            if it is the best guess */
1084         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1085         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1086             if (diff <= -sync_threshold)
1087                 delay = 0;
1088             else if (diff >= sync_threshold)
1089                 delay = 2 * delay;
1090         }
1091     }
1092     is->frame_timer += delay;
1093
1094     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1095             delay, frame_current_pts, -diff);
1096
1097     return is->frame_timer;
1098 }
1099
1100 /* called to display each frame */
1101 static void video_refresh(void *opaque)
1102 {
1103     VideoState *is = opaque;
1104     VideoPicture *vp;
1105
1106     SubPicture *sp, *sp2;
1107
1108     if (is->video_st) {
1109 retry:
1110         if (is->pictq_size == 0) {
1111             //nothing to do, no picture to display in the que
1112         } else {
1113             double time= av_gettime()/1000000.0;
1114             double next_target;
1115             /* dequeue the picture */
1116             vp = &is->pictq[is->pictq_rindex];
1117
1118             if(time < vp->target_clock)
1119                 return;
1120             /* update current video pts */
1121             is->video_current_pts = vp->pts;
1122             is->video_current_pts_drift = is->video_current_pts - time;
1123             is->video_current_pos = vp->pos;
1124             if(is->pictq_size > 1){
1125                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1126                 assert(nextvp->target_clock >= vp->target_clock);
1127                 next_target= nextvp->target_clock;
1128             }else{
1129                 next_target= vp->target_clock + vp->duration;
1130             }
1131             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1132                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1133                 if(is->pictq_size > 1 || time > next_target + 0.5){
1134                     /* update queue size and signal for next picture */
1135                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1136                         is->pictq_rindex = 0;
1137
1138                     SDL_LockMutex(is->pictq_mutex);
1139                     is->pictq_size--;
1140                     SDL_CondSignal(is->pictq_cond);
1141                     SDL_UnlockMutex(is->pictq_mutex);
1142                     goto retry;
1143                 }
1144             }
1145
1146             if(is->subtitle_st) {
1147                 if (is->subtitle_stream_changed) {
1148                     SDL_LockMutex(is->subpq_mutex);
1149
1150                     while (is->subpq_size) {
1151                         free_subpicture(&is->subpq[is->subpq_rindex]);
1152
1153                         /* update queue size and signal for next picture */
1154                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                             is->subpq_rindex = 0;
1156
1157                         is->subpq_size--;
1158                     }
1159                     is->subtitle_stream_changed = 0;
1160
1161                     SDL_CondSignal(is->subpq_cond);
1162                     SDL_UnlockMutex(is->subpq_mutex);
1163                 } else {
1164                     if (is->subpq_size > 0) {
1165                         sp = &is->subpq[is->subpq_rindex];
1166
1167                         if (is->subpq_size > 1)
1168                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1169                         else
1170                             sp2 = NULL;
1171
1172                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1173                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1174                         {
1175                             free_subpicture(sp);
1176
1177                             /* update queue size and signal for next picture */
1178                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1179                                 is->subpq_rindex = 0;
1180
1181                             SDL_LockMutex(is->subpq_mutex);
1182                             is->subpq_size--;
1183                             SDL_CondSignal(is->subpq_cond);
1184                             SDL_UnlockMutex(is->subpq_mutex);
1185                         }
1186                     }
1187                 }
1188             }
1189
1190             /* display picture */
1191             if (!display_disable)
1192                 video_display(is);
1193
1194             /* update queue size and signal for next picture */
1195             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1196                 is->pictq_rindex = 0;
1197
1198             SDL_LockMutex(is->pictq_mutex);
1199             is->pictq_size--;
1200             SDL_CondSignal(is->pictq_cond);
1201             SDL_UnlockMutex(is->pictq_mutex);
1202         }
1203     } else if (is->audio_st) {
1204         /* draw the next audio frame */
1205
1206         /* if only audio stream, then display the audio bars (better
1207            than nothing, just to test the implementation */
1208
1209         /* display picture */
1210         if (!display_disable)
1211             video_display(is);
1212     }
1213     if (show_status) {
1214         static int64_t last_time;
1215         int64_t cur_time;
1216         int aqsize, vqsize, sqsize;
1217         double av_diff;
1218
1219         cur_time = av_gettime();
1220         if (!last_time || (cur_time - last_time) >= 30000) {
1221             aqsize = 0;
1222             vqsize = 0;
1223             sqsize = 0;
1224             if (is->audio_st)
1225                 aqsize = is->audioq.size;
1226             if (is->video_st)
1227                 vqsize = is->videoq.size;
1228             if (is->subtitle_st)
1229                 sqsize = is->subtitleq.size;
1230             av_diff = 0;
1231             if (is->audio_st && is->video_st)
1232                 av_diff = get_audio_clock(is) - get_video_clock(is);
1233             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1234                    get_master_clock(is),
1235                    av_diff,
1236                    FFMAX(is->skip_frames-1, 0),
1237                    aqsize / 1024,
1238                    vqsize / 1024,
1239                    sqsize,
1240                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1241                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1242             fflush(stdout);
1243             last_time = cur_time;
1244         }
1245     }
1246 }
1247
1248 /* allocate a picture (needs to do that in main thread to avoid
1249    potential locking problems */
1250 static void alloc_picture(void *opaque)
1251 {
1252     VideoState *is = opaque;
1253     VideoPicture *vp;
1254
1255     vp = &is->pictq[is->pictq_windex];
1256
1257     if (vp->bmp)
1258         SDL_FreeYUVOverlay(vp->bmp);
1259
1260 #if CONFIG_AVFILTER
1261     if (vp->picref)
1262         avfilter_unref_buffer(vp->picref);
1263     vp->picref = NULL;
1264
1265     vp->width   = is->out_video_filter->inputs[0]->w;
1266     vp->height  = is->out_video_filter->inputs[0]->h;
1267     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1268 #else
1269     vp->width   = is->video_st->codec->width;
1270     vp->height  = is->video_st->codec->height;
1271     vp->pix_fmt = is->video_st->codec->pix_fmt;
1272 #endif
1273
1274     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1275                                    SDL_YV12_OVERLAY,
1276                                    screen);
1277     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1278         /* SDL allocates a buffer smaller than requested if the video
1279          * overlay hardware is unable to support the requested size. */
1280         fprintf(stderr, "Error: the video system does not support an image\n"
1281                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1282                         "to reduce the image size.\n", vp->width, vp->height );
1283         do_exit();
1284     }
1285
1286     SDL_LockMutex(is->pictq_mutex);
1287     vp->allocated = 1;
1288     SDL_CondSignal(is->pictq_cond);
1289     SDL_UnlockMutex(is->pictq_mutex);
1290 }
1291
1292 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1293 {
1294     VideoPicture *vp;
1295     double frame_delay, pts = pts1;
1296
1297     /* compute the exact PTS for the picture if it is omitted in the stream
1298      * pts1 is the dts of the pkt / pts of the frame */
1299     if (pts != 0) {
1300         /* update video clock with pts, if present */
1301         is->video_clock = pts;
1302     } else {
1303         pts = is->video_clock;
1304     }
1305     /* update video clock for next frame */
1306     frame_delay = av_q2d(is->video_st->codec->time_base);
1307     /* for MPEG2, the frame can be repeated, so we update the
1308        clock accordingly */
1309     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1310     is->video_clock += frame_delay;
1311
1312 #if defined(DEBUG_SYNC) && 0
1313     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1314            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1315 #endif
1316
1317     /* wait until we have space to put a new picture */
1318     SDL_LockMutex(is->pictq_mutex);
1319
1320     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1321         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1322
1323     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1324            !is->videoq.abort_request) {
1325         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1326     }
1327     SDL_UnlockMutex(is->pictq_mutex);
1328
1329     if (is->videoq.abort_request)
1330         return -1;
1331
1332     vp = &is->pictq[is->pictq_windex];
1333
1334     vp->duration = frame_delay;
1335
1336     /* alloc or resize hardware picture buffer */
1337     if (!vp->bmp ||
1338 #if CONFIG_AVFILTER
1339         vp->width  != is->out_video_filter->inputs[0]->w ||
1340         vp->height != is->out_video_filter->inputs[0]->h) {
1341 #else
1342         vp->width != is->video_st->codec->width ||
1343         vp->height != is->video_st->codec->height) {
1344 #endif
1345         SDL_Event event;
1346
1347         vp->allocated = 0;
1348
1349         /* the allocation must be done in the main thread to avoid
1350            locking problems */
1351         event.type = FF_ALLOC_EVENT;
1352         event.user.data1 = is;
1353         SDL_PushEvent(&event);
1354
1355         /* wait until the picture is allocated */
1356         SDL_LockMutex(is->pictq_mutex);
1357         while (!vp->allocated && !is->videoq.abort_request) {
1358             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1359         }
1360         SDL_UnlockMutex(is->pictq_mutex);
1361
1362         if (is->videoq.abort_request)
1363             return -1;
1364     }
1365
1366     /* if the frame is not skipped, then display it */
1367     if (vp->bmp) {
1368         AVPicture pict;
1369 #if CONFIG_AVFILTER
1370         if(vp->picref)
1371             avfilter_unref_buffer(vp->picref);
1372         vp->picref = src_frame->opaque;
1373 #endif
1374
1375         /* get a pointer on the bitmap */
1376         SDL_LockYUVOverlay (vp->bmp);
1377
1378         memset(&pict,0,sizeof(AVPicture));
1379         pict.data[0] = vp->bmp->pixels[0];
1380         pict.data[1] = vp->bmp->pixels[2];
1381         pict.data[2] = vp->bmp->pixels[1];
1382
1383         pict.linesize[0] = vp->bmp->pitches[0];
1384         pict.linesize[1] = vp->bmp->pitches[2];
1385         pict.linesize[2] = vp->bmp->pitches[1];
1386
1387 #if CONFIG_AVFILTER
1388         //FIXME use direct rendering
1389         av_picture_copy(&pict, (AVPicture *)src_frame,
1390                         vp->pix_fmt, vp->width, vp->height);
1391 #else
1392         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1393         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1394             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1395             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1396         if (is->img_convert_ctx == NULL) {
1397             fprintf(stderr, "Cannot initialize the conversion context\n");
1398             exit(1);
1399         }
1400         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1401                   0, vp->height, pict.data, pict.linesize);
1402 #endif
1403         /* update the bitmap content */
1404         SDL_UnlockYUVOverlay(vp->bmp);
1405
1406         vp->pts = pts;
1407         vp->pos = pos;
1408
1409         /* now we can update the picture count */
1410         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1411             is->pictq_windex = 0;
1412         SDL_LockMutex(is->pictq_mutex);
1413         vp->target_clock= compute_target_time(vp->pts, is);
1414
1415         is->pictq_size++;
1416         SDL_UnlockMutex(is->pictq_mutex);
1417     }
1418     return 0;
1419 }
1420
1421 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1422 {
1423     int got_picture, i;
1424
1425     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1426         return -1;
1427
1428     if (pkt->data == flush_pkt.data) {
1429         avcodec_flush_buffers(is->video_st->codec);
1430
1431         SDL_LockMutex(is->pictq_mutex);
1432         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1433         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1434             is->pictq[i].target_clock= 0;
1435         }
1436         while (is->pictq_size && !is->videoq.abort_request) {
1437             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1438         }
1439         is->video_current_pos = -1;
1440         SDL_UnlockMutex(is->pictq_mutex);
1441
1442         is->frame_last_pts = AV_NOPTS_VALUE;
1443         is->frame_last_delay = 0;
1444         is->frame_timer = (double)av_gettime() / 1000000.0;
1445         is->skip_frames = 1;
1446         is->skip_frames_index = 0;
1447         return 0;
1448     }
1449
1450     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1451
1452     if (got_picture) {
1453         if (decoder_reorder_pts == -1) {
1454             *pts = frame->best_effort_timestamp;
1455         } else if (decoder_reorder_pts) {
1456             *pts = frame->pkt_pts;
1457         } else {
1458             *pts = frame->pkt_dts;
1459         }
1460
1461         if (*pts == AV_NOPTS_VALUE) {
1462             *pts = 0;
1463         }
1464
1465         is->skip_frames_index += 1;
1466         if(is->skip_frames_index >= is->skip_frames){
1467             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1468             return 1;
1469         }
1470
1471     }
1472     return 0;
1473 }
1474
1475 #if CONFIG_AVFILTER
1476 typedef struct {
1477     VideoState *is;
1478     AVFrame *frame;
1479     int use_dr1;
1480 } FilterPriv;
1481
1482 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1483 {
1484     AVFilterContext *ctx = codec->opaque;
1485     AVFilterBufferRef  *ref;
1486     int perms = AV_PERM_WRITE;
1487     int i, w, h, stride[4];
1488     unsigned edge;
1489     int pixel_size;
1490
1491     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1492
1493     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1494         perms |= AV_PERM_NEG_LINESIZES;
1495
1496     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1497         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1498         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1499         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1500     }
1501     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1502
1503     w = codec->width;
1504     h = codec->height;
1505
1506     if(av_image_check_size(w, h, 0, codec))
1507         return -1;
1508
1509     avcodec_align_dimensions2(codec, &w, &h, stride);
1510     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1511     w += edge << 1;
1512     h += edge << 1;
1513
1514     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1515         return -1;
1516
1517     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1518     ref->video->w = codec->width;
1519     ref->video->h = codec->height;
1520     for(i = 0; i < 4; i ++) {
1521         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1522         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1523
1524         if (ref->data[i]) {
1525             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1526         }
1527         pic->data[i]     = ref->data[i];
1528         pic->linesize[i] = ref->linesize[i];
1529     }
1530     pic->opaque = ref;
1531     pic->age    = INT_MAX;
1532     pic->type   = FF_BUFFER_TYPE_USER;
1533     pic->reordered_opaque = codec->reordered_opaque;
1534     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1535     else           pic->pkt_pts = AV_NOPTS_VALUE;
1536     return 0;
1537 }
1538
1539 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1540 {
1541     memset(pic->data, 0, sizeof(pic->data));
1542     avfilter_unref_buffer(pic->opaque);
1543 }
1544
1545 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1546 {
1547     AVFilterBufferRef *ref = pic->opaque;
1548
1549     if (pic->data[0] == NULL) {
1550         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1551         return codec->get_buffer(codec, pic);
1552     }
1553
1554     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1555         (codec->pix_fmt != ref->format)) {
1556         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1557         return -1;
1558     }
1559
1560     pic->reordered_opaque = codec->reordered_opaque;
1561     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1562     else           pic->pkt_pts = AV_NOPTS_VALUE;
1563     return 0;
1564 }
1565
1566 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1567 {
1568     FilterPriv *priv = ctx->priv;
1569     AVCodecContext *codec;
1570     if(!opaque) return -1;
1571
1572     priv->is = opaque;
1573     codec    = priv->is->video_st->codec;
1574     codec->opaque = ctx;
1575     if((codec->codec->capabilities & CODEC_CAP_DR1)
1576     ) {
1577         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1578         priv->use_dr1 = 1;
1579         codec->get_buffer     = input_get_buffer;
1580         codec->release_buffer = input_release_buffer;
1581         codec->reget_buffer   = input_reget_buffer;
1582         codec->thread_safe_callbacks = 1;
1583     }
1584
1585     priv->frame = avcodec_alloc_frame();
1586
1587     return 0;
1588 }
1589
1590 static void input_uninit(AVFilterContext *ctx)
1591 {
1592     FilterPriv *priv = ctx->priv;
1593     av_free(priv->frame);
1594 }
1595
1596 static int input_request_frame(AVFilterLink *link)
1597 {
1598     FilterPriv *priv = link->src->priv;
1599     AVFilterBufferRef *picref;
1600     int64_t pts = 0;
1601     AVPacket pkt;
1602     int ret;
1603
1604     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1605         av_free_packet(&pkt);
1606     if (ret < 0)
1607         return -1;
1608
1609     if(priv->use_dr1 && priv->frame->opaque) {
1610         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1611     } else {
1612         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1613         av_image_copy(picref->data, picref->linesize,
1614                       priv->frame->data, priv->frame->linesize,
1615                       picref->format, link->w, link->h);
1616     }
1617     av_free_packet(&pkt);
1618
1619     avfilter_copy_frame_props(picref, priv->frame);
1620     picref->pts = pts;
1621
1622     avfilter_start_frame(link, picref);
1623     avfilter_draw_slice(link, 0, link->h, 1);
1624     avfilter_end_frame(link);
1625
1626     return 0;
1627 }
1628
1629 static int input_query_formats(AVFilterContext *ctx)
1630 {
1631     FilterPriv *priv = ctx->priv;
1632     enum PixelFormat pix_fmts[] = {
1633         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1634     };
1635
1636     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1637     return 0;
1638 }
1639
1640 static int input_config_props(AVFilterLink *link)
1641 {
1642     FilterPriv *priv  = link->src->priv;
1643     AVCodecContext *c = priv->is->video_st->codec;
1644
1645     link->w = c->width;
1646     link->h = c->height;
1647     link->sample_aspect_ratio = priv->is->video_st->sample_aspect_ratio;
1648     link->time_base = priv->is->video_st->time_base;
1649
1650     return 0;
1651 }
1652
1653 static AVFilter input_filter =
1654 {
1655     .name      = "ffplay_input",
1656
1657     .priv_size = sizeof(FilterPriv),
1658
1659     .init      = input_init,
1660     .uninit    = input_uninit,
1661
1662     .query_formats = input_query_formats,
1663
1664     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1665     .outputs   = (AVFilterPad[]) {{ .name = "default",
1666                                     .type = AVMEDIA_TYPE_VIDEO,
1667                                     .request_frame = input_request_frame,
1668                                     .config_props  = input_config_props, },
1669                                   { .name = NULL }},
1670 };
1671
1672 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1673 {
1674     char sws_flags_str[128];
1675     int ret;
1676     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1677     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1678     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1679     graph->scale_sws_opts = av_strdup(sws_flags_str);
1680
1681     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1682                                             NULL, is, graph)) < 0)
1683         return ret;
1684     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1685                                             NULL, pix_fmts, graph)) < 0)
1686         return ret;
1687
1688     if(vfilters) {
1689         AVFilterInOut *outputs = avfilter_inout_alloc();
1690         AVFilterInOut *inputs  = avfilter_inout_alloc();
1691
1692         outputs->name    = av_strdup("in");
1693         outputs->filter_ctx = filt_src;
1694         outputs->pad_idx = 0;
1695         outputs->next    = NULL;
1696
1697         inputs->name    = av_strdup("out");
1698         inputs->filter_ctx = filt_out;
1699         inputs->pad_idx = 0;
1700         inputs->next    = NULL;
1701
1702         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1703             return ret;
1704         av_freep(&vfilters);
1705     } else {
1706         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1707             return ret;
1708     }
1709
1710     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1711         return ret;
1712
1713     is->out_video_filter = filt_out;
1714
1715     return ret;
1716 }
1717
1718 #endif  /* CONFIG_AVFILTER */
1719
1720 static int video_thread(void *arg)
1721 {
1722     VideoState *is = arg;
1723     AVFrame *frame= avcodec_alloc_frame();
1724     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1725     double pts;
1726     int ret;
1727
1728 #if CONFIG_AVFILTER
1729     AVFilterGraph *graph = avfilter_graph_alloc();
1730     AVFilterContext *filt_out = NULL;
1731
1732     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1733         goto the_end;
1734     filt_out = is->out_video_filter;
1735 #endif
1736
1737     for(;;) {
1738 #if !CONFIG_AVFILTER
1739         AVPacket pkt;
1740 #else
1741         AVFilterBufferRef *picref;
1742         AVRational tb = filt_out->inputs[0]->time_base;
1743 #endif
1744         while (is->paused && !is->videoq.abort_request)
1745             SDL_Delay(10);
1746 #if CONFIG_AVFILTER
1747         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1748         if (picref) {
1749             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1750             pts_int = picref->pts;
1751             pos     = picref->pos;
1752             frame->opaque = picref;
1753         }
1754
1755         if (av_cmp_q(tb, is->video_st->time_base)) {
1756             av_unused int64_t pts1 = pts_int;
1757             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1758             av_dlog(NULL, "video_thread(): "
1759                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1760                     tb.num, tb.den, pts1,
1761                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1762         }
1763 #else
1764         ret = get_video_frame(is, frame, &pts_int, &pkt);
1765         pos = pkt.pos;
1766         av_free_packet(&pkt);
1767 #endif
1768
1769         if (ret < 0) goto the_end;
1770
1771         if (!picref)
1772             continue;
1773
1774         pts = pts_int*av_q2d(is->video_st->time_base);
1775
1776         ret = queue_picture(is, frame, pts, pos);
1777
1778         if (ret < 0)
1779             goto the_end;
1780
1781         if (step)
1782             if (cur_stream)
1783                 stream_toggle_pause(cur_stream);
1784     }
1785  the_end:
1786 #if CONFIG_AVFILTER
1787     avfilter_graph_free(&graph);
1788 #endif
1789     av_free(frame);
1790     return 0;
1791 }
1792
1793 static int subtitle_thread(void *arg)
1794 {
1795     VideoState *is = arg;
1796     SubPicture *sp;
1797     AVPacket pkt1, *pkt = &pkt1;
1798     int got_subtitle;
1799     double pts;
1800     int i, j;
1801     int r, g, b, y, u, v, a;
1802
1803     for(;;) {
1804         while (is->paused && !is->subtitleq.abort_request) {
1805             SDL_Delay(10);
1806         }
1807         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1808             break;
1809
1810         if(pkt->data == flush_pkt.data){
1811             avcodec_flush_buffers(is->subtitle_st->codec);
1812             continue;
1813         }
1814         SDL_LockMutex(is->subpq_mutex);
1815         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1816                !is->subtitleq.abort_request) {
1817             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1818         }
1819         SDL_UnlockMutex(is->subpq_mutex);
1820
1821         if (is->subtitleq.abort_request)
1822             return 0;
1823
1824         sp = &is->subpq[is->subpq_windex];
1825
1826        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1827            this packet, if any */
1828         pts = 0;
1829         if (pkt->pts != AV_NOPTS_VALUE)
1830             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1831
1832         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1833                                  &got_subtitle, pkt);
1834
1835         if (got_subtitle && sp->sub.format == 0) {
1836             sp->pts = pts;
1837
1838             for (i = 0; i < sp->sub.num_rects; i++)
1839             {
1840                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1841                 {
1842                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1843                     y = RGB_TO_Y_CCIR(r, g, b);
1844                     u = RGB_TO_U_CCIR(r, g, b, 0);
1845                     v = RGB_TO_V_CCIR(r, g, b, 0);
1846                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1847                 }
1848             }
1849
1850             /* now we can update the picture count */
1851             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1852                 is->subpq_windex = 0;
1853             SDL_LockMutex(is->subpq_mutex);
1854             is->subpq_size++;
1855             SDL_UnlockMutex(is->subpq_mutex);
1856         }
1857         av_free_packet(pkt);
1858     }
1859     return 0;
1860 }
1861
1862 /* copy samples for viewing in editor window */
1863 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1864 {
1865     int size, len;
1866
1867     size = samples_size / sizeof(short);
1868     while (size > 0) {
1869         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1870         if (len > size)
1871             len = size;
1872         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1873         samples += len;
1874         is->sample_array_index += len;
1875         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1876             is->sample_array_index = 0;
1877         size -= len;
1878     }
1879 }
1880
1881 /* return the new audio buffer size (samples can be added or deleted
1882    to get better sync if video or external master clock) */
1883 static int synchronize_audio(VideoState *is, short *samples,
1884                              int samples_size1, double pts)
1885 {
1886     int n, samples_size;
1887     double ref_clock;
1888
1889     n = 2 * is->audio_st->codec->channels;
1890     samples_size = samples_size1;
1891
1892     /* if not master, then we try to remove or add samples to correct the clock */
1893     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1894          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1895         double diff, avg_diff;
1896         int wanted_size, min_size, max_size, nb_samples;
1897
1898         ref_clock = get_master_clock(is);
1899         diff = get_audio_clock(is) - ref_clock;
1900
1901         if (diff < AV_NOSYNC_THRESHOLD) {
1902             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1903             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1904                 /* not enough measures to have a correct estimate */
1905                 is->audio_diff_avg_count++;
1906             } else {
1907                 /* estimate the A-V difference */
1908                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1909
1910                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1911                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1912                     nb_samples = samples_size / n;
1913
1914                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1915                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1916                     if (wanted_size < min_size)
1917                         wanted_size = min_size;
1918                     else if (wanted_size > max_size)
1919                         wanted_size = max_size;
1920
1921                     /* add or remove samples to correction the synchro */
1922                     if (wanted_size < samples_size) {
1923                         /* remove samples */
1924                         samples_size = wanted_size;
1925                     } else if (wanted_size > samples_size) {
1926                         uint8_t *samples_end, *q;
1927                         int nb;
1928
1929                         /* add samples */
1930                         nb = (samples_size - wanted_size);
1931                         samples_end = (uint8_t *)samples + samples_size - n;
1932                         q = samples_end + n;
1933                         while (nb > 0) {
1934                             memcpy(q, samples_end, n);
1935                             q += n;
1936                             nb -= n;
1937                         }
1938                         samples_size = wanted_size;
1939                     }
1940                 }
1941 #if 0
1942                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1943                        diff, avg_diff, samples_size - samples_size1,
1944                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1945 #endif
1946             }
1947         } else {
1948             /* too big difference : may be initial PTS errors, so
1949                reset A-V filter */
1950             is->audio_diff_avg_count = 0;
1951             is->audio_diff_cum = 0;
1952         }
1953     }
1954
1955     return samples_size;
1956 }
1957
1958 /* decode one audio frame and returns its uncompressed size */
1959 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1960 {
1961     AVPacket *pkt_temp = &is->audio_pkt_temp;
1962     AVPacket *pkt = &is->audio_pkt;
1963     AVCodecContext *dec= is->audio_st->codec;
1964     int n, len1, data_size;
1965     double pts;
1966
1967     for(;;) {
1968         /* NOTE: the audio packet can contain several frames */
1969         while (pkt_temp->size > 0) {
1970             data_size = sizeof(is->audio_buf1);
1971             len1 = avcodec_decode_audio3(dec,
1972                                         (int16_t *)is->audio_buf1, &data_size,
1973                                         pkt_temp);
1974             if (len1 < 0) {
1975                 /* if error, we skip the frame */
1976                 pkt_temp->size = 0;
1977                 break;
1978             }
1979
1980             pkt_temp->data += len1;
1981             pkt_temp->size -= len1;
1982             if (data_size <= 0)
1983                 continue;
1984
1985             if (dec->sample_fmt != is->audio_src_fmt) {
1986                 if (is->reformat_ctx)
1987                     av_audio_convert_free(is->reformat_ctx);
1988                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
1989                                                          dec->sample_fmt, 1, NULL, 0);
1990                 if (!is->reformat_ctx) {
1991                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1992                         av_get_sample_fmt_name(dec->sample_fmt),
1993                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
1994                         break;
1995                 }
1996                 is->audio_src_fmt= dec->sample_fmt;
1997             }
1998
1999             if (is->reformat_ctx) {
2000                 const void *ibuf[6]= {is->audio_buf1};
2001                 void *obuf[6]= {is->audio_buf2};
2002                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2003                 int ostride[6]= {2};
2004                 int len= data_size/istride[0];
2005                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2006                     printf("av_audio_convert() failed\n");
2007                     break;
2008                 }
2009                 is->audio_buf= is->audio_buf2;
2010                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2011                           remove this legacy cruft */
2012                 data_size= len*2;
2013             }else{
2014                 is->audio_buf= is->audio_buf1;
2015             }
2016
2017             /* if no pts, then compute it */
2018             pts = is->audio_clock;
2019             *pts_ptr = pts;
2020             n = 2 * dec->channels;
2021             is->audio_clock += (double)data_size /
2022                 (double)(n * dec->sample_rate);
2023 #ifdef DEBUG
2024             {
2025                 static double last_clock;
2026                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2027                        is->audio_clock - last_clock,
2028                        is->audio_clock, pts);
2029                 last_clock = is->audio_clock;
2030             }
2031 #endif
2032             return data_size;
2033         }
2034
2035         /* free the current packet */
2036         if (pkt->data)
2037             av_free_packet(pkt);
2038
2039         if (is->paused || is->audioq.abort_request) {
2040             return -1;
2041         }
2042
2043         /* read next packet */
2044         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2045             return -1;
2046         if(pkt->data == flush_pkt.data){
2047             avcodec_flush_buffers(dec);
2048             continue;
2049         }
2050
2051         pkt_temp->data = pkt->data;
2052         pkt_temp->size = pkt->size;
2053
2054         /* if update the audio clock with the pts */
2055         if (pkt->pts != AV_NOPTS_VALUE) {
2056             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2057         }
2058     }
2059 }
2060
2061 /* prepare a new audio buffer */
2062 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2063 {
2064     VideoState *is = opaque;
2065     int audio_size, len1;
2066     int bytes_per_sec;
2067     double pts;
2068
2069     audio_callback_time = av_gettime();
2070
2071     while (len > 0) {
2072         if (is->audio_buf_index >= is->audio_buf_size) {
2073            audio_size = audio_decode_frame(is, &pts);
2074            if (audio_size < 0) {
2075                 /* if error, just output silence */
2076                is->audio_buf = is->audio_buf1;
2077                is->audio_buf_size = 1024;
2078                memset(is->audio_buf, 0, is->audio_buf_size);
2079            } else {
2080                if (is->show_mode != SHOW_MODE_VIDEO)
2081                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2082                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2083                                               pts);
2084                is->audio_buf_size = audio_size;
2085            }
2086            is->audio_buf_index = 0;
2087         }
2088         len1 = is->audio_buf_size - is->audio_buf_index;
2089         if (len1 > len)
2090             len1 = len;
2091         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2092         len -= len1;
2093         stream += len1;
2094         is->audio_buf_index += len1;
2095     }
2096     bytes_per_sec = is->audio_st->codec->sample_rate *
2097             2 * is->audio_st->codec->channels;
2098     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2099     /* Let's assume the audio driver that is used by SDL has two periods. */
2100     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2101     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2102 }
2103
2104 /* open a given stream. Return 0 if OK */
2105 static int stream_component_open(VideoState *is, int stream_index)
2106 {
2107     AVFormatContext *ic = is->ic;
2108     AVCodecContext *avctx;
2109     AVCodec *codec;
2110     SDL_AudioSpec wanted_spec, spec;
2111     AVDictionary *opts;
2112     AVDictionaryEntry *t = NULL;
2113
2114     if (stream_index < 0 || stream_index >= ic->nb_streams)
2115         return -1;
2116     avctx = ic->streams[stream_index]->codec;
2117
2118     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2119
2120     /* prepare audio output */
2121     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2122         if (avctx->channels > 0) {
2123             avctx->request_channels = FFMIN(2, avctx->channels);
2124         } else {
2125             avctx->request_channels = 2;
2126         }
2127     }
2128
2129     codec = avcodec_find_decoder(avctx->codec_id);
2130     if (!codec)
2131         return -1;
2132
2133     avctx->workaround_bugs = workaround_bugs;
2134     avctx->lowres = lowres;
2135     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2136     avctx->idct_algo= idct;
2137     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2138     avctx->skip_frame= skip_frame;
2139     avctx->skip_idct= skip_idct;
2140     avctx->skip_loop_filter= skip_loop_filter;
2141     avctx->error_recognition= error_recognition;
2142     avctx->error_concealment= error_concealment;
2143     avctx->thread_count= thread_count;
2144
2145     if(codec->capabilities & CODEC_CAP_DR1)
2146         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2147
2148     if (!codec ||
2149         avcodec_open2(avctx, codec, &opts) < 0)
2150         return -1;
2151     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2152         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2153         return AVERROR_OPTION_NOT_FOUND;
2154     }
2155
2156     /* prepare audio output */
2157     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2158         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2159             fprintf(stderr, "Invalid sample rate or channel count\n");
2160             return -1;
2161         }
2162         wanted_spec.freq = avctx->sample_rate;
2163         wanted_spec.format = AUDIO_S16SYS;
2164         wanted_spec.channels = avctx->channels;
2165         wanted_spec.silence = 0;
2166         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2167         wanted_spec.callback = sdl_audio_callback;
2168         wanted_spec.userdata = is;
2169         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2170             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2171             return -1;
2172         }
2173         is->audio_hw_buf_size = spec.size;
2174         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2175     }
2176
2177     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2178     switch(avctx->codec_type) {
2179     case AVMEDIA_TYPE_AUDIO:
2180         is->audio_stream = stream_index;
2181         is->audio_st = ic->streams[stream_index];
2182         is->audio_buf_size = 0;
2183         is->audio_buf_index = 0;
2184
2185         /* init averaging filter */
2186         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2187         is->audio_diff_avg_count = 0;
2188         /* since we do not have a precise anough audio fifo fullness,
2189            we correct audio sync only if larger than this threshold */
2190         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2191
2192         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2193         packet_queue_init(&is->audioq);
2194         SDL_PauseAudio(0);
2195         break;
2196     case AVMEDIA_TYPE_VIDEO:
2197         is->video_stream = stream_index;
2198         is->video_st = ic->streams[stream_index];
2199
2200         packet_queue_init(&is->videoq);
2201         is->video_tid = SDL_CreateThread(video_thread, is);
2202         break;
2203     case AVMEDIA_TYPE_SUBTITLE:
2204         is->subtitle_stream = stream_index;
2205         is->subtitle_st = ic->streams[stream_index];
2206         packet_queue_init(&is->subtitleq);
2207
2208         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2209         break;
2210     default:
2211         break;
2212     }
2213     return 0;
2214 }
2215
2216 static void stream_component_close(VideoState *is, int stream_index)
2217 {
2218     AVFormatContext *ic = is->ic;
2219     AVCodecContext *avctx;
2220
2221     if (stream_index < 0 || stream_index >= ic->nb_streams)
2222         return;
2223     avctx = ic->streams[stream_index]->codec;
2224
2225     switch(avctx->codec_type) {
2226     case AVMEDIA_TYPE_AUDIO:
2227         packet_queue_abort(&is->audioq);
2228
2229         SDL_CloseAudio();
2230
2231         packet_queue_end(&is->audioq);
2232         if (is->reformat_ctx)
2233             av_audio_convert_free(is->reformat_ctx);
2234         is->reformat_ctx = NULL;
2235         break;
2236     case AVMEDIA_TYPE_VIDEO:
2237         packet_queue_abort(&is->videoq);
2238
2239         /* note: we also signal this mutex to make sure we deblock the
2240            video thread in all cases */
2241         SDL_LockMutex(is->pictq_mutex);
2242         SDL_CondSignal(is->pictq_cond);
2243         SDL_UnlockMutex(is->pictq_mutex);
2244
2245         SDL_WaitThread(is->video_tid, NULL);
2246
2247         packet_queue_end(&is->videoq);
2248         break;
2249     case AVMEDIA_TYPE_SUBTITLE:
2250         packet_queue_abort(&is->subtitleq);
2251
2252         /* note: we also signal this mutex to make sure we deblock the
2253            video thread in all cases */
2254         SDL_LockMutex(is->subpq_mutex);
2255         is->subtitle_stream_changed = 1;
2256
2257         SDL_CondSignal(is->subpq_cond);
2258         SDL_UnlockMutex(is->subpq_mutex);
2259
2260         SDL_WaitThread(is->subtitle_tid, NULL);
2261
2262         packet_queue_end(&is->subtitleq);
2263         break;
2264     default:
2265         break;
2266     }
2267
2268     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2269     avcodec_close(avctx);
2270     switch(avctx->codec_type) {
2271     case AVMEDIA_TYPE_AUDIO:
2272         is->audio_st = NULL;
2273         is->audio_stream = -1;
2274         break;
2275     case AVMEDIA_TYPE_VIDEO:
2276         is->video_st = NULL;
2277         is->video_stream = -1;
2278         break;
2279     case AVMEDIA_TYPE_SUBTITLE:
2280         is->subtitle_st = NULL;
2281         is->subtitle_stream = -1;
2282         break;
2283     default:
2284         break;
2285     }
2286 }
2287
2288 /* since we have only one decoding thread, we can use a global
2289    variable instead of a thread local variable */
2290 static VideoState *global_video_state;
2291
2292 static int decode_interrupt_cb(void)
2293 {
2294     return (global_video_state && global_video_state->abort_request);
2295 }
2296
2297 /* this thread gets the stream from the disk or the network */
2298 static int read_thread(void *arg)
2299 {
2300     VideoState *is = arg;
2301     AVFormatContext *ic = NULL;
2302     int err, i, ret;
2303     int st_index[AVMEDIA_TYPE_NB];
2304     AVPacket pkt1, *pkt = &pkt1;
2305     int eof=0;
2306     int pkt_in_play_range = 0;
2307     AVDictionaryEntry *t;
2308     AVDictionary **opts;
2309     int orig_nb_streams;
2310
2311     memset(st_index, -1, sizeof(st_index));
2312     is->video_stream = -1;
2313     is->audio_stream = -1;
2314     is->subtitle_stream = -1;
2315
2316     global_video_state = is;
2317     avio_set_interrupt_cb(decode_interrupt_cb);
2318
2319     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2320     if (err < 0) {
2321         print_error(is->filename, err);
2322         ret = -1;
2323         goto fail;
2324     }
2325     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2326         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2327         ret = AVERROR_OPTION_NOT_FOUND;
2328         goto fail;
2329     }
2330     is->ic = ic;
2331
2332     if(genpts)
2333         ic->flags |= AVFMT_FLAG_GENPTS;
2334
2335     opts = setup_find_stream_info_opts(ic, codec_opts);
2336     orig_nb_streams = ic->nb_streams;
2337
2338     err = avformat_find_stream_info(ic, opts);
2339     if (err < 0) {
2340         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2341         ret = -1;
2342         goto fail;
2343     }
2344     for (i = 0; i < orig_nb_streams; i++)
2345         av_dict_free(&opts[i]);
2346     av_freep(&opts);
2347
2348     if(ic->pb)
2349         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2350
2351     if(seek_by_bytes<0)
2352         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2353
2354     /* if seeking requested, we execute it */
2355     if (start_time != AV_NOPTS_VALUE) {
2356         int64_t timestamp;
2357
2358         timestamp = start_time;
2359         /* add the stream start time */
2360         if (ic->start_time != AV_NOPTS_VALUE)
2361             timestamp += ic->start_time;
2362         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2363         if (ret < 0) {
2364             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2365                     is->filename, (double)timestamp / AV_TIME_BASE);
2366         }
2367     }
2368
2369     for (i = 0; i < ic->nb_streams; i++)
2370         ic->streams[i]->discard = AVDISCARD_ALL;
2371     if (!video_disable)
2372         st_index[AVMEDIA_TYPE_VIDEO] =
2373             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2374                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2375     if (!audio_disable)
2376         st_index[AVMEDIA_TYPE_AUDIO] =
2377             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2378                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2379                                 st_index[AVMEDIA_TYPE_VIDEO],
2380                                 NULL, 0);
2381     if (!video_disable)
2382         st_index[AVMEDIA_TYPE_SUBTITLE] =
2383             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2384                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2385                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2386                                  st_index[AVMEDIA_TYPE_AUDIO] :
2387                                  st_index[AVMEDIA_TYPE_VIDEO]),
2388                                 NULL, 0);
2389     if (show_status) {
2390         av_dump_format(ic, 0, is->filename, 0);
2391     }
2392
2393     is->show_mode = show_mode;
2394
2395     /* open the streams */
2396     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2397         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2398     }
2399
2400     ret=-1;
2401     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2402         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2403     }
2404     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2405     if (is->show_mode == SHOW_MODE_NONE)
2406         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2407
2408     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2409         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2410     }
2411
2412     if (is->video_stream < 0 && is->audio_stream < 0) {
2413         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2414         ret = -1;
2415         goto fail;
2416     }
2417
2418     for(;;) {
2419         if (is->abort_request)
2420             break;
2421         if (is->paused != is->last_paused) {
2422             is->last_paused = is->paused;
2423             if (is->paused)
2424                 is->read_pause_return= av_read_pause(ic);
2425             else
2426                 av_read_play(ic);
2427         }
2428 #if CONFIG_RTSP_DEMUXER
2429         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2430             /* wait 10 ms to avoid trying to get another packet */
2431             /* XXX: horrible */
2432             SDL_Delay(10);
2433             continue;
2434         }
2435 #endif
2436         if (is->seek_req) {
2437             int64_t seek_target= is->seek_pos;
2438             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2439             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2440 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2441 //      of the seek_pos/seek_rel variables
2442
2443             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2444             if (ret < 0) {
2445                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2446             }else{
2447                 if (is->audio_stream >= 0) {
2448                     packet_queue_flush(&is->audioq);
2449                     packet_queue_put(&is->audioq, &flush_pkt);
2450                 }
2451                 if (is->subtitle_stream >= 0) {
2452                     packet_queue_flush(&is->subtitleq);
2453                     packet_queue_put(&is->subtitleq, &flush_pkt);
2454                 }
2455                 if (is->video_stream >= 0) {
2456                     packet_queue_flush(&is->videoq);
2457                     packet_queue_put(&is->videoq, &flush_pkt);
2458                 }
2459             }
2460             is->seek_req = 0;
2461             eof= 0;
2462         }
2463
2464         /* if the queue are full, no need to read more */
2465         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2466             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2467                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2468                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2469             /* wait 10 ms */
2470             SDL_Delay(10);
2471             continue;
2472         }
2473         if(eof) {
2474             if(is->video_stream >= 0){
2475                 av_init_packet(pkt);
2476                 pkt->data=NULL;
2477                 pkt->size=0;
2478                 pkt->stream_index= is->video_stream;
2479                 packet_queue_put(&is->videoq, pkt);
2480             }
2481             SDL_Delay(10);
2482             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2483                 if(loop!=1 && (!loop || --loop)){
2484                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2485                 }else if(autoexit){
2486                     ret=AVERROR_EOF;
2487                     goto fail;
2488                 }
2489             }
2490             eof=0;
2491             continue;
2492         }
2493         ret = av_read_frame(ic, pkt);
2494         if (ret < 0) {
2495             if (ret == AVERROR_EOF || url_feof(ic->pb))
2496                 eof=1;
2497             if (ic->pb && ic->pb->error)
2498                 break;
2499             SDL_Delay(100); /* wait for user event */
2500             continue;
2501         }
2502         /* check if packet is in play range specified by user, then queue, otherwise discard */
2503         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2504                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2505                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2506                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2507                 <= ((double)duration/1000000);
2508         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2509             packet_queue_put(&is->audioq, pkt);
2510         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2511             packet_queue_put(&is->videoq, pkt);
2512         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2513             packet_queue_put(&is->subtitleq, pkt);
2514         } else {
2515             av_free_packet(pkt);
2516         }
2517     }
2518     /* wait until the end */
2519     while (!is->abort_request) {
2520         SDL_Delay(100);
2521     }
2522
2523     ret = 0;
2524  fail:
2525     /* disable interrupting */
2526     global_video_state = NULL;
2527
2528     /* close each stream */
2529     if (is->audio_stream >= 0)
2530         stream_component_close(is, is->audio_stream);
2531     if (is->video_stream >= 0)
2532         stream_component_close(is, is->video_stream);
2533     if (is->subtitle_stream >= 0)
2534         stream_component_close(is, is->subtitle_stream);
2535     if (is->ic) {
2536         av_close_input_file(is->ic);
2537         is->ic = NULL; /* safety */
2538     }
2539     avio_set_interrupt_cb(NULL);
2540
2541     if (ret != 0) {
2542         SDL_Event event;
2543
2544         event.type = FF_QUIT_EVENT;
2545         event.user.data1 = is;
2546         SDL_PushEvent(&event);
2547     }
2548     return 0;
2549 }
2550
2551 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2552 {
2553     VideoState *is;
2554
2555     is = av_mallocz(sizeof(VideoState));
2556     if (!is)
2557         return NULL;
2558     av_strlcpy(is->filename, filename, sizeof(is->filename));
2559     is->iformat = iformat;
2560     is->ytop = 0;
2561     is->xleft = 0;
2562
2563     /* start video display */
2564     is->pictq_mutex = SDL_CreateMutex();
2565     is->pictq_cond = SDL_CreateCond();
2566
2567     is->subpq_mutex = SDL_CreateMutex();
2568     is->subpq_cond = SDL_CreateCond();
2569
2570     is->av_sync_type = av_sync_type;
2571     is->read_tid = SDL_CreateThread(read_thread, is);
2572     if (!is->read_tid) {
2573         av_free(is);
2574         return NULL;
2575     }
2576     return is;
2577 }
2578
2579 static void stream_cycle_channel(VideoState *is, int codec_type)
2580 {
2581     AVFormatContext *ic = is->ic;
2582     int start_index, stream_index;
2583     AVStream *st;
2584
2585     if (codec_type == AVMEDIA_TYPE_VIDEO)
2586         start_index = is->video_stream;
2587     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2588         start_index = is->audio_stream;
2589     else
2590         start_index = is->subtitle_stream;
2591     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2592         return;
2593     stream_index = start_index;
2594     for(;;) {
2595         if (++stream_index >= is->ic->nb_streams)
2596         {
2597             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2598             {
2599                 stream_index = -1;
2600                 goto the_end;
2601             } else
2602                 stream_index = 0;
2603         }
2604         if (stream_index == start_index)
2605             return;
2606         st = ic->streams[stream_index];
2607         if (st->codec->codec_type == codec_type) {
2608             /* check that parameters are OK */
2609             switch(codec_type) {
2610             case AVMEDIA_TYPE_AUDIO:
2611                 if (st->codec->sample_rate != 0 &&
2612                     st->codec->channels != 0)
2613                     goto the_end;
2614                 break;
2615             case AVMEDIA_TYPE_VIDEO:
2616             case AVMEDIA_TYPE_SUBTITLE:
2617                 goto the_end;
2618             default:
2619                 break;
2620             }
2621         }
2622     }
2623  the_end:
2624     stream_component_close(is, start_index);
2625     stream_component_open(is, stream_index);
2626 }
2627
2628
2629 static void toggle_full_screen(void)
2630 {
2631     is_full_screen = !is_full_screen;
2632     video_open(cur_stream);
2633 }
2634
2635 static void toggle_pause(void)
2636 {
2637     if (cur_stream)
2638         stream_toggle_pause(cur_stream);
2639     step = 0;
2640 }
2641
2642 static void step_to_next_frame(void)
2643 {
2644     if (cur_stream) {
2645         /* if the stream is paused unpause it, then step */
2646         if (cur_stream->paused)
2647             stream_toggle_pause(cur_stream);
2648     }
2649     step = 1;
2650 }
2651
2652 static void toggle_audio_display(void)
2653 {
2654     if (cur_stream) {
2655         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2656         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2657         fill_rectangle(screen,
2658                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2659                     bgcolor);
2660         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2661     }
2662 }
2663
2664 /* handle an event sent by the GUI */
2665 static void event_loop(void)
2666 {
2667     SDL_Event event;
2668     double incr, pos, frac;
2669
2670     for(;;) {
2671         double x;
2672         SDL_WaitEvent(&event);
2673         switch(event.type) {
2674         case SDL_KEYDOWN:
2675             if (exit_on_keydown) {
2676                 do_exit();
2677                 break;
2678             }
2679             switch(event.key.keysym.sym) {
2680             case SDLK_ESCAPE:
2681             case SDLK_q:
2682                 do_exit();
2683                 break;
2684             case SDLK_f:
2685                 toggle_full_screen();
2686                 break;
2687             case SDLK_p:
2688             case SDLK_SPACE:
2689                 toggle_pause();
2690                 break;
2691             case SDLK_s: //S: Step to next frame
2692                 step_to_next_frame();
2693                 break;
2694             case SDLK_a:
2695                 if (cur_stream)
2696                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2697                 break;
2698             case SDLK_v:
2699                 if (cur_stream)
2700                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2701                 break;
2702             case SDLK_t:
2703                 if (cur_stream)
2704                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2705                 break;
2706             case SDLK_w:
2707                 toggle_audio_display();
2708                 break;
2709             case SDLK_LEFT:
2710                 incr = -10.0;
2711                 goto do_seek;
2712             case SDLK_RIGHT:
2713                 incr = 10.0;
2714                 goto do_seek;
2715             case SDLK_UP:
2716                 incr = 60.0;
2717                 goto do_seek;
2718             case SDLK_DOWN:
2719                 incr = -60.0;
2720             do_seek:
2721                 if (cur_stream) {
2722                     if (seek_by_bytes) {
2723                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2724                             pos= cur_stream->video_current_pos;
2725                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2726                             pos= cur_stream->audio_pkt.pos;
2727                         }else
2728                             pos = avio_tell(cur_stream->ic->pb);
2729                         if (cur_stream->ic->bit_rate)
2730                             incr *= cur_stream->ic->bit_rate / 8.0;
2731                         else
2732                             incr *= 180000.0;
2733                         pos += incr;
2734                         stream_seek(cur_stream, pos, incr, 1);
2735                     } else {
2736                         pos = get_master_clock(cur_stream);
2737                         pos += incr;
2738                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2739                     }
2740                 }
2741                 break;
2742             default:
2743                 break;
2744             }
2745             break;
2746         case SDL_MOUSEBUTTONDOWN:
2747             if (exit_on_mousedown) {
2748                 do_exit();
2749                 break;
2750             }
2751         case SDL_MOUSEMOTION:
2752             if(event.type ==SDL_MOUSEBUTTONDOWN){
2753                 x= event.button.x;
2754             }else{
2755                 if(event.motion.state != SDL_PRESSED)
2756                     break;
2757                 x= event.motion.x;
2758             }
2759             if (cur_stream) {
2760                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2761                     uint64_t size=  avio_size(cur_stream->ic->pb);
2762                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2763                 }else{
2764                     int64_t ts;
2765                     int ns, hh, mm, ss;
2766                     int tns, thh, tmm, tss;
2767                     tns = cur_stream->ic->duration/1000000LL;
2768                     thh = tns/3600;
2769                     tmm = (tns%3600)/60;
2770                     tss = (tns%60);
2771                     frac = x/cur_stream->width;
2772                     ns = frac*tns;
2773                     hh = ns/3600;
2774                     mm = (ns%3600)/60;
2775                     ss = (ns%60);
2776                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2777                             hh, mm, ss, thh, tmm, tss);
2778                     ts = frac*cur_stream->ic->duration;
2779                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2780                         ts += cur_stream->ic->start_time;
2781                     stream_seek(cur_stream, ts, 0, 0);
2782                 }
2783             }
2784             break;
2785         case SDL_VIDEORESIZE:
2786             if (cur_stream) {
2787                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2788                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2789                 screen_width = cur_stream->width = event.resize.w;
2790                 screen_height= cur_stream->height= event.resize.h;
2791             }
2792             break;
2793         case SDL_QUIT:
2794         case FF_QUIT_EVENT:
2795             do_exit();
2796             break;
2797         case FF_ALLOC_EVENT:
2798             video_open(event.user.data1);
2799             alloc_picture(event.user.data1);
2800             break;
2801         case FF_REFRESH_EVENT:
2802             video_refresh(event.user.data1);
2803             cur_stream->refresh=0;
2804             break;
2805         default:
2806             break;
2807         }
2808     }
2809 }
2810
2811 static int opt_frame_size(const char *opt, const char *arg)
2812 {
2813     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2814     return opt_default("video_size", arg);
2815 }
2816
2817 static int opt_width(const char *opt, const char *arg)
2818 {
2819     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2820     return 0;
2821 }
2822
2823 static int opt_height(const char *opt, const char *arg)
2824 {
2825     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2826     return 0;
2827 }
2828
2829 static int opt_format(const char *opt, const char *arg)
2830 {
2831     file_iformat = av_find_input_format(arg);
2832     if (!file_iformat) {
2833         fprintf(stderr, "Unknown input format: %s\n", arg);
2834         return AVERROR(EINVAL);
2835     }
2836     return 0;
2837 }
2838
2839 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2840 {
2841     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2842     return opt_default("pixel_format", arg);
2843 }
2844
2845 static int opt_sync(const char *opt, const char *arg)
2846 {
2847     if (!strcmp(arg, "audio"))
2848         av_sync_type = AV_SYNC_AUDIO_MASTER;
2849     else if (!strcmp(arg, "video"))
2850         av_sync_type = AV_SYNC_VIDEO_MASTER;
2851     else if (!strcmp(arg, "ext"))
2852         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2853     else {
2854         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2855         exit(1);
2856     }
2857     return 0;
2858 }
2859
2860 static int opt_seek(const char *opt, const char *arg)
2861 {
2862     start_time = parse_time_or_die(opt, arg, 1);
2863     return 0;
2864 }
2865
2866 static int opt_duration(const char *opt, const char *arg)
2867 {
2868     duration = parse_time_or_die(opt, arg, 1);
2869     return 0;
2870 }
2871
2872 static int opt_thread_count(const char *opt, const char *arg)
2873 {
2874     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2875 #if !HAVE_THREADS
2876     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2877 #endif
2878     return 0;
2879 }
2880
2881 static int opt_show_mode(const char *opt, const char *arg)
2882 {
2883     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2884                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2885                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2886                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2887     return 0;
2888 }
2889
2890 static int opt_input_file(const char *opt, const char *filename)
2891 {
2892     if (input_filename) {
2893         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2894                 filename, input_filename);
2895         exit(1);
2896     }
2897     if (!strcmp(filename, "-"))
2898         filename = "pipe:";
2899     input_filename = filename;
2900     return 0;
2901 }
2902
2903 static const OptionDef options[] = {
2904 #include "cmdutils_common_opts.h"
2905     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2906     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2907     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2908     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2909     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2910     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2911     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2912     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2913     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2914     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2915     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2916     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2917     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2918     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2919     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2920     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2921     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2922     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2923     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2924     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2925     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2926     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2927     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2928     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2929     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2930     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2931     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2932     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2933     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2934     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2935     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2936     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2937     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2938     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2939     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2940 #if CONFIG_AVFILTER
2941     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2942 #endif
2943     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2944     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2945     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2946     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2947     { NULL, },
2948 };
2949
2950 static void show_usage(void)
2951 {
2952     printf("Simple media player\n");
2953     printf("usage: %s [options] input_file\n", program_name);
2954     printf("\n");
2955 }
2956
2957 static int opt_help(const char *opt, const char *arg)
2958 {
2959     av_log_set_callback(log_callback_help);
2960     show_usage();
2961     show_help_options(options, "Main options:\n",
2962                       OPT_EXPERT, 0);
2963     show_help_options(options, "\nAdvanced options:\n",
2964                       OPT_EXPERT, OPT_EXPERT);
2965     printf("\n");
2966     av_opt_show2(avcodec_opts[0], NULL,
2967                  AV_OPT_FLAG_DECODING_PARAM, 0);
2968     printf("\n");
2969     av_opt_show2(avformat_opts, NULL,
2970                  AV_OPT_FLAG_DECODING_PARAM, 0);
2971 #if !CONFIG_AVFILTER
2972     printf("\n");
2973     av_opt_show2(sws_opts, NULL,
2974                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2975 #endif
2976     printf("\nWhile playing:\n"
2977            "q, ESC              quit\n"
2978            "f                   toggle full screen\n"
2979            "p, SPC              pause\n"
2980            "a                   cycle audio channel\n"
2981            "v                   cycle video channel\n"
2982            "t                   cycle subtitle channel\n"
2983            "w                   show audio waves\n"
2984            "s                   activate frame-step mode\n"
2985            "left/right          seek backward/forward 10 seconds\n"
2986            "down/up             seek backward/forward 1 minute\n"
2987            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2988            );
2989     return 0;
2990 }
2991
2992 /* Called from the main */
2993 int main(int argc, char **argv)
2994 {
2995     int flags;
2996
2997     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2998
2999     /* register all codecs, demux and protocols */
3000     avcodec_register_all();
3001 #if CONFIG_AVDEVICE
3002     avdevice_register_all();
3003 #endif
3004 #if CONFIG_AVFILTER
3005     avfilter_register_all();
3006 #endif
3007     av_register_all();
3008
3009     init_opts();
3010
3011     show_banner();
3012
3013     parse_options(argc, argv, options, opt_input_file);
3014
3015     if (!input_filename) {
3016         show_usage();
3017         fprintf(stderr, "An input file must be specified\n");
3018         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3019         exit(1);
3020     }
3021
3022     if (display_disable) {
3023         video_disable = 1;
3024     }
3025     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3026     if (audio_disable)
3027         flags &= ~SDL_INIT_AUDIO;
3028 #if !defined(__MINGW32__) && !defined(__APPLE__)
3029     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3030 #endif
3031     if (SDL_Init (flags)) {
3032         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3033         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3034         exit(1);
3035     }
3036
3037     if (!display_disable) {
3038 #if HAVE_SDL_VIDEO_SIZE
3039         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3040         fs_screen_width = vi->current_w;
3041         fs_screen_height = vi->current_h;
3042 #endif
3043     }
3044
3045     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3046     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3047     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3048
3049     av_init_packet(&flush_pkt);
3050     flush_pkt.data= "FLUSH";
3051
3052     cur_stream = stream_open(input_filename, file_iformat);
3053
3054     event_loop();
3055
3056     /* never returns */
3057
3058     return 0;
3059 }