ffplay: get rid of the global cur_stream variable
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210     int step;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static int opt_help(const char *opt, const char *arg);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
281 {
282     AVPacketList *pkt1;
283
284     /* duplicate the packet */
285     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
286         return -1;
287
288     pkt1 = av_malloc(sizeof(AVPacketList));
289     if (!pkt1)
290         return -1;
291     pkt1->pkt = *pkt;
292     pkt1->next = NULL;
293
294
295     SDL_LockMutex(q->mutex);
296
297     if (!q->last_pkt)
298
299         q->first_pkt = pkt1;
300     else
301         q->last_pkt->next = pkt1;
302     q->last_pkt = pkt1;
303     q->nb_packets++;
304     q->size += pkt1->pkt.size + sizeof(*pkt1);
305     /* XXX: should duplicate packet data in DV case */
306     SDL_CondSignal(q->cond);
307
308     SDL_UnlockMutex(q->mutex);
309     return 0;
310 }
311
312 /* packet queue handling */
313 static void packet_queue_init(PacketQueue *q)
314 {
315     memset(q, 0, sizeof(PacketQueue));
316     q->mutex = SDL_CreateMutex();
317     q->cond = SDL_CreateCond();
318     packet_queue_put(q, &flush_pkt);
319 }
320
321 static void packet_queue_flush(PacketQueue *q)
322 {
323     AVPacketList *pkt, *pkt1;
324
325     SDL_LockMutex(q->mutex);
326     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
327         pkt1 = pkt->next;
328         av_free_packet(&pkt->pkt);
329         av_freep(&pkt);
330     }
331     q->last_pkt = NULL;
332     q->first_pkt = NULL;
333     q->nb_packets = 0;
334     q->size = 0;
335     SDL_UnlockMutex(q->mutex);
336 }
337
338 static void packet_queue_end(PacketQueue *q)
339 {
340     packet_queue_flush(q);
341     SDL_DestroyMutex(q->mutex);
342     SDL_DestroyCond(q->cond);
343 }
344
345 static void packet_queue_abort(PacketQueue *q)
346 {
347     SDL_LockMutex(q->mutex);
348
349     q->abort_request = 1;
350
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
357 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
358 {
359     AVPacketList *pkt1;
360     int ret;
361
362     SDL_LockMutex(q->mutex);
363
364     for(;;) {
365         if (q->abort_request) {
366             ret = -1;
367             break;
368         }
369
370         pkt1 = q->first_pkt;
371         if (pkt1) {
372             q->first_pkt = pkt1->next;
373             if (!q->first_pkt)
374                 q->last_pkt = NULL;
375             q->nb_packets--;
376             q->size -= pkt1->pkt.size + sizeof(*pkt1);
377             *pkt = pkt1->pkt;
378             av_free(pkt1);
379             ret = 1;
380             break;
381         } else if (!block) {
382             ret = 0;
383             break;
384         } else {
385             SDL_CondWait(q->cond, q->mutex);
386         }
387     }
388     SDL_UnlockMutex(q->mutex);
389     return ret;
390 }
391
392 static inline void fill_rectangle(SDL_Surface *screen,
393                                   int x, int y, int w, int h, int color)
394 {
395     SDL_Rect rect;
396     rect.x = x;
397     rect.y = y;
398     rect.w = w;
399     rect.h = h;
400     SDL_FillRect(screen, &rect, color);
401 }
402
403 #define ALPHA_BLEND(a, oldp, newp, s)\
404 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
405
406 #define RGBA_IN(r, g, b, a, s)\
407 {\
408     unsigned int v = ((const uint32_t *)(s))[0];\
409     a = (v >> 24) & 0xff;\
410     r = (v >> 16) & 0xff;\
411     g = (v >> 8) & 0xff;\
412     b = v & 0xff;\
413 }
414
415 #define YUVA_IN(y, u, v, a, s, pal)\
416 {\
417     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
418     a = (val >> 24) & 0xff;\
419     y = (val >> 16) & 0xff;\
420     u = (val >> 8) & 0xff;\
421     v = val & 0xff;\
422 }
423
424 #define YUVA_OUT(d, y, u, v, a)\
425 {\
426     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 }
428
429
430 #define BPP 1
431
432 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
433 {
434     int wrap, wrap3, width2, skip2;
435     int y, u, v, a, u1, v1, a1, w, h;
436     uint8_t *lum, *cb, *cr;
437     const uint8_t *p;
438     const uint32_t *pal;
439     int dstx, dsty, dstw, dsth;
440
441     dstw = av_clip(rect->w, 0, imgw);
442     dsth = av_clip(rect->h, 0, imgh);
443     dstx = av_clip(rect->x, 0, imgw - dstw);
444     dsty = av_clip(rect->y, 0, imgh - dsth);
445     lum = dst->data[0] + dsty * dst->linesize[0];
446     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
447     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
448
449     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
450     skip2 = dstx >> 1;
451     wrap = dst->linesize[0];
452     wrap3 = rect->pict.linesize[0];
453     p = rect->pict.data[0];
454     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
455
456     if (dsty & 1) {
457         lum += dstx;
458         cb += skip2;
459         cr += skip2;
460
461         if (dstx & 1) {
462             YUVA_IN(y, u, v, a, p, pal);
463             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
465             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
466             cb++;
467             cr++;
468             lum++;
469             p += BPP;
470         }
471         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
472             YUVA_IN(y, u, v, a, p, pal);
473             u1 = u;
474             v1 = v;
475             a1 = a;
476             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
477
478             YUVA_IN(y, u, v, a, p + BPP, pal);
479             u1 += u;
480             v1 += v;
481             a1 += a;
482             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
483             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
484             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
485             cb++;
486             cr++;
487             p += 2 * BPP;
488             lum += 2;
489         }
490         if (w) {
491             YUVA_IN(y, u, v, a, p, pal);
492             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
493             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
494             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
495             p++;
496             lum++;
497         }
498         p += wrap3 - dstw * BPP;
499         lum += wrap - dstw - dstx;
500         cb += dst->linesize[1] - width2 - skip2;
501         cr += dst->linesize[2] - width2 - skip2;
502     }
503     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             u1 = u;
511             v1 = v;
512             a1 = a;
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             p += wrap3;
515             lum += wrap;
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 += u;
518             v1 += v;
519             a1 += a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523             cb++;
524             cr++;
525             p += -wrap3 + BPP;
526             lum += -wrap + 1;
527         }
528         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 = u;
531             v1 = v;
532             a1 = a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535             YUVA_IN(y, u, v, a, p + BPP, pal);
536             u1 += u;
537             v1 += v;
538             a1 += a;
539             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540             p += wrap3;
541             lum += wrap;
542
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554
555             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557
558             cb++;
559             cr++;
560             p += -wrap3 + 2 * BPP;
561             lum += -wrap + 2;
562         }
563         if (w) {
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 = u;
566             v1 = v;
567             a1 = a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             p += wrap3;
570             lum += wrap;
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 += u;
573             v1 += v;
574             a1 += a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578             cb++;
579             cr++;
580             p += -wrap3 + BPP;
581             lum += -wrap + 1;
582         }
583         p += wrap3 + (wrap3 - dstw * BPP);
584         lum += wrap + (wrap - dstw - dstx);
585         cb += dst->linesize[1] - width2 - skip2;
586         cr += dst->linesize[2] - width2 - skip2;
587     }
588     /* handle odd height */
589     if (h) {
590         lum += dstx;
591         cb += skip2;
592         cr += skip2;
593
594         if (dstx & 1) {
595             YUVA_IN(y, u, v, a, p, pal);
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599             cb++;
600             cr++;
601             lum++;
602             p += BPP;
603         }
604         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605             YUVA_IN(y, u, v, a, p, pal);
606             u1 = u;
607             v1 = v;
608             a1 = a;
609             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610
611             YUVA_IN(y, u, v, a, p + BPP, pal);
612             u1 += u;
613             v1 += v;
614             a1 += a;
615             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618             cb++;
619             cr++;
620             p += 2 * BPP;
621             lum += 2;
622         }
623         if (w) {
624             YUVA_IN(y, u, v, a, p, pal);
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628         }
629     }
630 }
631
632 static void free_subpicture(SubPicture *sp)
633 {
634     avsubtitle_free(&sp->sub);
635 }
636
637 static void video_image_display(VideoState *is)
638 {
639     VideoPicture *vp;
640     SubPicture *sp;
641     AVPicture pict;
642     float aspect_ratio;
643     int width, height, x, y;
644     SDL_Rect rect;
645     int i;
646
647     vp = &is->pictq[is->pictq_rindex];
648     if (vp->bmp) {
649 #if CONFIG_AVFILTER
650          if (vp->picref->video->sample_aspect_ratio.num == 0)
651              aspect_ratio = 0;
652          else
653              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
654 #else
655
656         /* XXX: use variable in the frame */
657         if (is->video_st->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
659         else if (is->video_st->codec->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
661         else
662             aspect_ratio = 0;
663 #endif
664         if (aspect_ratio <= 0.0)
665             aspect_ratio = 1.0;
666         aspect_ratio *= (float)vp->width / (float)vp->height;
667
668         if (is->subtitle_st) {
669             if (is->subpq_size > 0) {
670                 sp = &is->subpq[is->subpq_rindex];
671
672                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
673                     SDL_LockYUVOverlay (vp->bmp);
674
675                     pict.data[0] = vp->bmp->pixels[0];
676                     pict.data[1] = vp->bmp->pixels[2];
677                     pict.data[2] = vp->bmp->pixels[1];
678
679                     pict.linesize[0] = vp->bmp->pitches[0];
680                     pict.linesize[1] = vp->bmp->pitches[2];
681                     pict.linesize[2] = vp->bmp->pitches[1];
682
683                     for (i = 0; i < sp->sub.num_rects; i++)
684                         blend_subrect(&pict, sp->sub.rects[i],
685                                       vp->bmp->w, vp->bmp->h);
686
687                     SDL_UnlockYUVOverlay (vp->bmp);
688                 }
689             }
690         }
691
692
693         /* XXX: we suppose the screen has a 1.0 pixel ratio */
694         height = is->height;
695         width = ((int)rint(height * aspect_ratio)) & ~1;
696         if (width > is->width) {
697             width = is->width;
698             height = ((int)rint(width / aspect_ratio)) & ~1;
699         }
700         x = (is->width - width) / 2;
701         y = (is->height - height) / 2;
702         is->no_background = 0;
703         rect.x = is->xleft + x;
704         rect.y = is->ytop  + y;
705         rect.w = FFMAX(width,  1);
706         rect.h = FFMAX(height, 1);
707         SDL_DisplayYUVOverlay(vp->bmp, &rect);
708     }
709 }
710
711 static inline int compute_mod(int a, int b)
712 {
713     return a < 0 ? a%b + b : a%b;
714 }
715
716 static void video_audio_display(VideoState *s)
717 {
718     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
719     int ch, channels, h, h2, bgcolor, fgcolor;
720     int16_t time_diff;
721     int rdft_bits, nb_freq;
722
723     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
724         ;
725     nb_freq= 1<<(rdft_bits-1);
726
727     /* compute display index : center on currently output samples */
728     channels = s->audio_st->codec->channels;
729     nb_display_channels = channels;
730     if (!s->paused) {
731         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
732         n = 2 * channels;
733         delay = s->audio_write_buf_size;
734         delay /= n;
735
736         /* to be more precise, we take into account the time spent since
737            the last buffer computation */
738         if (audio_callback_time) {
739             time_diff = av_gettime() - audio_callback_time;
740             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
741         }
742
743         delay += 2*data_used;
744         if (delay < data_used)
745             delay = data_used;
746
747         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
748         if (s->show_mode == SHOW_MODE_WAVES) {
749             h= INT_MIN;
750             for(i=0; i<1000; i+=channels){
751                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
752                 int a= s->sample_array[idx];
753                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
754                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
755                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
756                 int score= a-d;
757                 if(h<score && (b^c)<0){
758                     h= score;
759                     i_start= idx;
760                 }
761             }
762         }
763
764         s->last_i_start = i_start;
765     } else {
766         i_start = s->last_i_start;
767     }
768
769     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
770     if (s->show_mode == SHOW_MODE_WAVES) {
771         fill_rectangle(screen,
772                        s->xleft, s->ytop, s->width, s->height,
773                        bgcolor);
774
775         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
776
777         /* total height for one channel */
778         h = s->height / nb_display_channels;
779         /* graph height / 2 */
780         h2 = (h * 9) / 20;
781         for(ch = 0;ch < nb_display_channels; ch++) {
782             i = i_start + ch;
783             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
784             for(x = 0; x < s->width; x++) {
785                 y = (s->sample_array[i] * h2) >> 15;
786                 if (y < 0) {
787                     y = -y;
788                     ys = y1 - y;
789                 } else {
790                     ys = y1;
791                 }
792                 fill_rectangle(screen,
793                                s->xleft + x, ys, 1, y,
794                                fgcolor);
795                 i += channels;
796                 if (i >= SAMPLE_ARRAY_SIZE)
797                     i -= SAMPLE_ARRAY_SIZE;
798             }
799         }
800
801         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
802
803         for(ch = 1;ch < nb_display_channels; ch++) {
804             y = s->ytop + ch * h;
805             fill_rectangle(screen,
806                            s->xleft, y, s->width, 1,
807                            fgcolor);
808         }
809         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
810     }else{
811         nb_display_channels= FFMIN(nb_display_channels, 2);
812         if(rdft_bits != s->rdft_bits){
813             av_rdft_end(s->rdft);
814             av_free(s->rdft_data);
815             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
816             s->rdft_bits= rdft_bits;
817             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
818         }
819         {
820             FFTSample *data[2];
821             for(ch = 0;ch < nb_display_channels; ch++) {
822                 data[ch] = s->rdft_data + 2*nb_freq*ch;
823                 i = i_start + ch;
824                 for(x = 0; x < 2*nb_freq; x++) {
825                     double w= (x-nb_freq)*(1.0/nb_freq);
826                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
827                     i += channels;
828                     if (i >= SAMPLE_ARRAY_SIZE)
829                         i -= SAMPLE_ARRAY_SIZE;
830                 }
831                 av_rdft_calc(s->rdft, data[ch]);
832             }
833             //least efficient way to do this, we should of course directly access it but its more than fast enough
834             for(y=0; y<s->height; y++){
835                 double w= 1/sqrt(nb_freq);
836                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
837                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
838                        + data[1][2*y+1]*data[1][2*y+1])) : a;
839                 a= FFMIN(a,255);
840                 b= FFMIN(b,255);
841                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
842
843                 fill_rectangle(screen,
844                             s->xpos, s->height-y, 1, 1,
845                             fgcolor);
846             }
847         }
848         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
849         s->xpos++;
850         if(s->xpos >= s->width)
851             s->xpos= s->xleft;
852     }
853 }
854
855 static void stream_close(VideoState *is)
856 {
857     VideoPicture *vp;
858     int i;
859     /* XXX: use a special url_shutdown call to abort parse cleanly */
860     is->abort_request = 1;
861     SDL_WaitThread(is->read_tid, NULL);
862     SDL_WaitThread(is->refresh_tid, NULL);
863
864     /* free all pictures */
865     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
866         vp = &is->pictq[i];
867 #if CONFIG_AVFILTER
868         if (vp->picref) {
869             avfilter_unref_buffer(vp->picref);
870             vp->picref = NULL;
871         }
872 #endif
873         if (vp->bmp) {
874             SDL_FreeYUVOverlay(vp->bmp);
875             vp->bmp = NULL;
876         }
877     }
878     SDL_DestroyMutex(is->pictq_mutex);
879     SDL_DestroyCond(is->pictq_cond);
880     SDL_DestroyMutex(is->subpq_mutex);
881     SDL_DestroyCond(is->subpq_cond);
882 #if !CONFIG_AVFILTER
883     if (is->img_convert_ctx)
884         sws_freeContext(is->img_convert_ctx);
885 #endif
886     av_free(is);
887 }
888
889 static void do_exit(VideoState *is)
890 {
891     if (is) {
892         stream_close(is);
893     }
894     uninit_opts();
895 #if CONFIG_AVFILTER
896     avfilter_uninit();
897 #endif
898     if (show_status)
899         printf("\n");
900     SDL_Quit();
901     av_log(NULL, AV_LOG_QUIET, "%s", "");
902     exit(0);
903 }
904
905 static int video_open(VideoState *is){
906     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
907     int w,h;
908
909     if(is_full_screen) flags |= SDL_FULLSCREEN;
910     else               flags |= SDL_RESIZABLE;
911
912     if (is_full_screen && fs_screen_width) {
913         w = fs_screen_width;
914         h = fs_screen_height;
915     } else if(!is_full_screen && screen_width){
916         w = screen_width;
917         h = screen_height;
918 #if CONFIG_AVFILTER
919     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
920         w = is->out_video_filter->inputs[0]->w;
921         h = is->out_video_filter->inputs[0]->h;
922 #else
923     }else if (is->video_st && is->video_st->codec->width){
924         w = is->video_st->codec->width;
925         h = is->video_st->codec->height;
926 #endif
927     } else {
928         w = 640;
929         h = 480;
930     }
931     if(screen && is->width == screen->w && screen->w == w
932        && is->height== screen->h && screen->h == h)
933         return 0;
934
935 #ifndef __APPLE__
936     screen = SDL_SetVideoMode(w, h, 0, flags);
937 #else
938     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
939     screen = SDL_SetVideoMode(w, h, 24, flags);
940 #endif
941     if (!screen) {
942         fprintf(stderr, "SDL: could not set video mode - exiting\n");
943         do_exit(is);
944     }
945     if (!window_title)
946         window_title = input_filename;
947     SDL_WM_SetCaption(window_title, window_title);
948
949     is->width = screen->w;
950     is->height = screen->h;
951
952     return 0;
953 }
954
955 /* display the current picture, if any */
956 static void video_display(VideoState *is)
957 {
958     if(!screen)
959         video_open(is);
960     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
961         video_audio_display(is);
962     else if (is->video_st)
963         video_image_display(is);
964 }
965
966 static int refresh_thread(void *opaque)
967 {
968     VideoState *is= opaque;
969     while(!is->abort_request){
970         SDL_Event event;
971         event.type = FF_REFRESH_EVENT;
972         event.user.data1 = opaque;
973         if(!is->refresh){
974             is->refresh=1;
975             SDL_PushEvent(&event);
976         }
977         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
978         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
979     }
980     return 0;
981 }
982
983 /* get the current audio clock value */
984 static double get_audio_clock(VideoState *is)
985 {
986     if (is->paused) {
987         return is->audio_current_pts;
988     } else {
989         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
990     }
991 }
992
993 /* get the current video clock value */
994 static double get_video_clock(VideoState *is)
995 {
996     if (is->paused) {
997         return is->video_current_pts;
998     } else {
999         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1000     }
1001 }
1002
1003 /* get the current external clock value */
1004 static double get_external_clock(VideoState *is)
1005 {
1006     int64_t ti;
1007     ti = av_gettime();
1008     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1009 }
1010
1011 /* get the current master clock value */
1012 static double get_master_clock(VideoState *is)
1013 {
1014     double val;
1015
1016     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1017         if (is->video_st)
1018             val = get_video_clock(is);
1019         else
1020             val = get_audio_clock(is);
1021     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1022         if (is->audio_st)
1023             val = get_audio_clock(is);
1024         else
1025             val = get_video_clock(is);
1026     } else {
1027         val = get_external_clock(is);
1028     }
1029     return val;
1030 }
1031
1032 /* seek in the stream */
1033 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1034 {
1035     if (!is->seek_req) {
1036         is->seek_pos = pos;
1037         is->seek_rel = rel;
1038         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1039         if (seek_by_bytes)
1040             is->seek_flags |= AVSEEK_FLAG_BYTE;
1041         is->seek_req = 1;
1042     }
1043 }
1044
1045 /* pause or resume the video */
1046 static void stream_toggle_pause(VideoState *is)
1047 {
1048     if (is->paused) {
1049         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1050         if(is->read_pause_return != AVERROR(ENOSYS)){
1051             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1052         }
1053         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1054     }
1055     is->paused = !is->paused;
1056 }
1057
1058 static double compute_target_time(double frame_current_pts, VideoState *is)
1059 {
1060     double delay, sync_threshold, diff;
1061
1062     /* compute nominal delay */
1063     delay = frame_current_pts - is->frame_last_pts;
1064     if (delay <= 0 || delay >= 10.0) {
1065         /* if incorrect delay, use previous one */
1066         delay = is->frame_last_delay;
1067     } else {
1068         is->frame_last_delay = delay;
1069     }
1070     is->frame_last_pts = frame_current_pts;
1071
1072     /* update delay to follow master synchronisation source */
1073     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1074          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1075         /* if video is slave, we try to correct big delays by
1076            duplicating or deleting a frame */
1077         diff = get_video_clock(is) - get_master_clock(is);
1078
1079         /* skip or repeat frame. We take into account the
1080            delay to compute the threshold. I still don't know
1081            if it is the best guess */
1082         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1083         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1084             if (diff <= -sync_threshold)
1085                 delay = 0;
1086             else if (diff >= sync_threshold)
1087                 delay = 2 * delay;
1088         }
1089     }
1090     is->frame_timer += delay;
1091
1092     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1093             delay, frame_current_pts, -diff);
1094
1095     return is->frame_timer;
1096 }
1097
1098 /* called to display each frame */
1099 static void video_refresh(void *opaque)
1100 {
1101     VideoState *is = opaque;
1102     VideoPicture *vp;
1103
1104     SubPicture *sp, *sp2;
1105
1106     if (is->video_st) {
1107 retry:
1108         if (is->pictq_size == 0) {
1109             //nothing to do, no picture to display in the que
1110         } else {
1111             double time= av_gettime()/1000000.0;
1112             double next_target;
1113             /* dequeue the picture */
1114             vp = &is->pictq[is->pictq_rindex];
1115
1116             if(time < vp->target_clock)
1117                 return;
1118             /* update current video pts */
1119             is->video_current_pts = vp->pts;
1120             is->video_current_pts_drift = is->video_current_pts - time;
1121             is->video_current_pos = vp->pos;
1122             if(is->pictq_size > 1){
1123                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1124                 assert(nextvp->target_clock >= vp->target_clock);
1125                 next_target= nextvp->target_clock;
1126             }else{
1127                 next_target= vp->target_clock + vp->duration;
1128             }
1129             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1130                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1131                 if(is->pictq_size > 1 || time > next_target + 0.5){
1132                     /* update queue size and signal for next picture */
1133                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1134                         is->pictq_rindex = 0;
1135
1136                     SDL_LockMutex(is->pictq_mutex);
1137                     is->pictq_size--;
1138                     SDL_CondSignal(is->pictq_cond);
1139                     SDL_UnlockMutex(is->pictq_mutex);
1140                     goto retry;
1141                 }
1142             }
1143
1144             if(is->subtitle_st) {
1145                 if (is->subtitle_stream_changed) {
1146                     SDL_LockMutex(is->subpq_mutex);
1147
1148                     while (is->subpq_size) {
1149                         free_subpicture(&is->subpq[is->subpq_rindex]);
1150
1151                         /* update queue size and signal for next picture */
1152                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153                             is->subpq_rindex = 0;
1154
1155                         is->subpq_size--;
1156                     }
1157                     is->subtitle_stream_changed = 0;
1158
1159                     SDL_CondSignal(is->subpq_cond);
1160                     SDL_UnlockMutex(is->subpq_mutex);
1161                 } else {
1162                     if (is->subpq_size > 0) {
1163                         sp = &is->subpq[is->subpq_rindex];
1164
1165                         if (is->subpq_size > 1)
1166                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1167                         else
1168                             sp2 = NULL;
1169
1170                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1171                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1172                         {
1173                             free_subpicture(sp);
1174
1175                             /* update queue size and signal for next picture */
1176                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1177                                 is->subpq_rindex = 0;
1178
1179                             SDL_LockMutex(is->subpq_mutex);
1180                             is->subpq_size--;
1181                             SDL_CondSignal(is->subpq_cond);
1182                             SDL_UnlockMutex(is->subpq_mutex);
1183                         }
1184                     }
1185                 }
1186             }
1187
1188             /* display picture */
1189             if (!display_disable)
1190                 video_display(is);
1191
1192             /* update queue size and signal for next picture */
1193             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1194                 is->pictq_rindex = 0;
1195
1196             SDL_LockMutex(is->pictq_mutex);
1197             is->pictq_size--;
1198             SDL_CondSignal(is->pictq_cond);
1199             SDL_UnlockMutex(is->pictq_mutex);
1200         }
1201     } else if (is->audio_st) {
1202         /* draw the next audio frame */
1203
1204         /* if only audio stream, then display the audio bars (better
1205            than nothing, just to test the implementation */
1206
1207         /* display picture */
1208         if (!display_disable)
1209             video_display(is);
1210     }
1211     if (show_status) {
1212         static int64_t last_time;
1213         int64_t cur_time;
1214         int aqsize, vqsize, sqsize;
1215         double av_diff;
1216
1217         cur_time = av_gettime();
1218         if (!last_time || (cur_time - last_time) >= 30000) {
1219             aqsize = 0;
1220             vqsize = 0;
1221             sqsize = 0;
1222             if (is->audio_st)
1223                 aqsize = is->audioq.size;
1224             if (is->video_st)
1225                 vqsize = is->videoq.size;
1226             if (is->subtitle_st)
1227                 sqsize = is->subtitleq.size;
1228             av_diff = 0;
1229             if (is->audio_st && is->video_st)
1230                 av_diff = get_audio_clock(is) - get_video_clock(is);
1231             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1232                    get_master_clock(is),
1233                    av_diff,
1234                    FFMAX(is->skip_frames-1, 0),
1235                    aqsize / 1024,
1236                    vqsize / 1024,
1237                    sqsize,
1238                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1239                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1240             fflush(stdout);
1241             last_time = cur_time;
1242         }
1243     }
1244 }
1245
1246 /* allocate a picture (needs to do that in main thread to avoid
1247    potential locking problems */
1248 static void alloc_picture(void *opaque)
1249 {
1250     VideoState *is = opaque;
1251     VideoPicture *vp;
1252
1253     vp = &is->pictq[is->pictq_windex];
1254
1255     if (vp->bmp)
1256         SDL_FreeYUVOverlay(vp->bmp);
1257
1258 #if CONFIG_AVFILTER
1259     if (vp->picref)
1260         avfilter_unref_buffer(vp->picref);
1261     vp->picref = NULL;
1262
1263     vp->width   = is->out_video_filter->inputs[0]->w;
1264     vp->height  = is->out_video_filter->inputs[0]->h;
1265     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1266 #else
1267     vp->width   = is->video_st->codec->width;
1268     vp->height  = is->video_st->codec->height;
1269     vp->pix_fmt = is->video_st->codec->pix_fmt;
1270 #endif
1271
1272     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1273                                    SDL_YV12_OVERLAY,
1274                                    screen);
1275     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1276         /* SDL allocates a buffer smaller than requested if the video
1277          * overlay hardware is unable to support the requested size. */
1278         fprintf(stderr, "Error: the video system does not support an image\n"
1279                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1280                         "to reduce the image size.\n", vp->width, vp->height );
1281         do_exit(is);
1282     }
1283
1284     SDL_LockMutex(is->pictq_mutex);
1285     vp->allocated = 1;
1286     SDL_CondSignal(is->pictq_cond);
1287     SDL_UnlockMutex(is->pictq_mutex);
1288 }
1289
1290 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1291 {
1292     VideoPicture *vp;
1293     double frame_delay, pts = pts1;
1294
1295     /* compute the exact PTS for the picture if it is omitted in the stream
1296      * pts1 is the dts of the pkt / pts of the frame */
1297     if (pts != 0) {
1298         /* update video clock with pts, if present */
1299         is->video_clock = pts;
1300     } else {
1301         pts = is->video_clock;
1302     }
1303     /* update video clock for next frame */
1304     frame_delay = av_q2d(is->video_st->codec->time_base);
1305     /* for MPEG2, the frame can be repeated, so we update the
1306        clock accordingly */
1307     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1308     is->video_clock += frame_delay;
1309
1310 #if defined(DEBUG_SYNC) && 0
1311     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1312            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1313 #endif
1314
1315     /* wait until we have space to put a new picture */
1316     SDL_LockMutex(is->pictq_mutex);
1317
1318     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1319         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1320
1321     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1322            !is->videoq.abort_request) {
1323         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1324     }
1325     SDL_UnlockMutex(is->pictq_mutex);
1326
1327     if (is->videoq.abort_request)
1328         return -1;
1329
1330     vp = &is->pictq[is->pictq_windex];
1331
1332     vp->duration = frame_delay;
1333
1334     /* alloc or resize hardware picture buffer */
1335     if (!vp->bmp ||
1336 #if CONFIG_AVFILTER
1337         vp->width  != is->out_video_filter->inputs[0]->w ||
1338         vp->height != is->out_video_filter->inputs[0]->h) {
1339 #else
1340         vp->width != is->video_st->codec->width ||
1341         vp->height != is->video_st->codec->height) {
1342 #endif
1343         SDL_Event event;
1344
1345         vp->allocated = 0;
1346
1347         /* the allocation must be done in the main thread to avoid
1348            locking problems */
1349         event.type = FF_ALLOC_EVENT;
1350         event.user.data1 = is;
1351         SDL_PushEvent(&event);
1352
1353         /* wait until the picture is allocated */
1354         SDL_LockMutex(is->pictq_mutex);
1355         while (!vp->allocated && !is->videoq.abort_request) {
1356             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1357         }
1358         SDL_UnlockMutex(is->pictq_mutex);
1359
1360         if (is->videoq.abort_request)
1361             return -1;
1362     }
1363
1364     /* if the frame is not skipped, then display it */
1365     if (vp->bmp) {
1366         AVPicture pict;
1367 #if CONFIG_AVFILTER
1368         if(vp->picref)
1369             avfilter_unref_buffer(vp->picref);
1370         vp->picref = src_frame->opaque;
1371 #endif
1372
1373         /* get a pointer on the bitmap */
1374         SDL_LockYUVOverlay (vp->bmp);
1375
1376         memset(&pict,0,sizeof(AVPicture));
1377         pict.data[0] = vp->bmp->pixels[0];
1378         pict.data[1] = vp->bmp->pixels[2];
1379         pict.data[2] = vp->bmp->pixels[1];
1380
1381         pict.linesize[0] = vp->bmp->pitches[0];
1382         pict.linesize[1] = vp->bmp->pitches[2];
1383         pict.linesize[2] = vp->bmp->pitches[1];
1384
1385 #if CONFIG_AVFILTER
1386         //FIXME use direct rendering
1387         av_picture_copy(&pict, (AVPicture *)src_frame,
1388                         vp->pix_fmt, vp->width, vp->height);
1389 #else
1390         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1391         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1392             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1393             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1394         if (is->img_convert_ctx == NULL) {
1395             fprintf(stderr, "Cannot initialize the conversion context\n");
1396             exit(1);
1397         }
1398         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1399                   0, vp->height, pict.data, pict.linesize);
1400 #endif
1401         /* update the bitmap content */
1402         SDL_UnlockYUVOverlay(vp->bmp);
1403
1404         vp->pts = pts;
1405         vp->pos = pos;
1406
1407         /* now we can update the picture count */
1408         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1409             is->pictq_windex = 0;
1410         SDL_LockMutex(is->pictq_mutex);
1411         vp->target_clock= compute_target_time(vp->pts, is);
1412
1413         is->pictq_size++;
1414         SDL_UnlockMutex(is->pictq_mutex);
1415     }
1416     return 0;
1417 }
1418
1419 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1420 {
1421     int got_picture, i;
1422
1423     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1424         return -1;
1425
1426     if (pkt->data == flush_pkt.data) {
1427         avcodec_flush_buffers(is->video_st->codec);
1428
1429         SDL_LockMutex(is->pictq_mutex);
1430         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1431         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1432             is->pictq[i].target_clock= 0;
1433         }
1434         while (is->pictq_size && !is->videoq.abort_request) {
1435             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1436         }
1437         is->video_current_pos = -1;
1438         SDL_UnlockMutex(is->pictq_mutex);
1439
1440         is->frame_last_pts = AV_NOPTS_VALUE;
1441         is->frame_last_delay = 0;
1442         is->frame_timer = (double)av_gettime() / 1000000.0;
1443         is->skip_frames = 1;
1444         is->skip_frames_index = 0;
1445         return 0;
1446     }
1447
1448     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1449
1450     if (got_picture) {
1451         if (decoder_reorder_pts == -1) {
1452             *pts = frame->best_effort_timestamp;
1453         } else if (decoder_reorder_pts) {
1454             *pts = frame->pkt_pts;
1455         } else {
1456             *pts = frame->pkt_dts;
1457         }
1458
1459         if (*pts == AV_NOPTS_VALUE) {
1460             *pts = 0;
1461         }
1462
1463         is->skip_frames_index += 1;
1464         if(is->skip_frames_index >= is->skip_frames){
1465             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1466             return 1;
1467         }
1468
1469     }
1470     return 0;
1471 }
1472
1473 #if CONFIG_AVFILTER
1474 typedef struct {
1475     VideoState *is;
1476     AVFrame *frame;
1477     int use_dr1;
1478 } FilterPriv;
1479
1480 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1481 {
1482     AVFilterContext *ctx = codec->opaque;
1483     AVFilterBufferRef  *ref;
1484     int perms = AV_PERM_WRITE;
1485     int i, w, h, stride[4];
1486     unsigned edge;
1487     int pixel_size;
1488
1489     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1490
1491     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1492         perms |= AV_PERM_NEG_LINESIZES;
1493
1494     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1495         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1496         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1497         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1498     }
1499     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1500
1501     w = codec->width;
1502     h = codec->height;
1503
1504     if(av_image_check_size(w, h, 0, codec))
1505         return -1;
1506
1507     avcodec_align_dimensions2(codec, &w, &h, stride);
1508     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1509     w += edge << 1;
1510     h += edge << 1;
1511
1512     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1513         return -1;
1514
1515     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1516     ref->video->w = codec->width;
1517     ref->video->h = codec->height;
1518     for(i = 0; i < 4; i ++) {
1519         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1520         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1521
1522         if (ref->data[i]) {
1523             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1524         }
1525         pic->data[i]     = ref->data[i];
1526         pic->linesize[i] = ref->linesize[i];
1527     }
1528     pic->opaque = ref;
1529     pic->age    = INT_MAX;
1530     pic->type   = FF_BUFFER_TYPE_USER;
1531     pic->reordered_opaque = codec->reordered_opaque;
1532     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1533     else           pic->pkt_pts = AV_NOPTS_VALUE;
1534     return 0;
1535 }
1536
1537 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1538 {
1539     memset(pic->data, 0, sizeof(pic->data));
1540     avfilter_unref_buffer(pic->opaque);
1541 }
1542
1543 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1544 {
1545     AVFilterBufferRef *ref = pic->opaque;
1546
1547     if (pic->data[0] == NULL) {
1548         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1549         return codec->get_buffer(codec, pic);
1550     }
1551
1552     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1553         (codec->pix_fmt != ref->format)) {
1554         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1555         return -1;
1556     }
1557
1558     pic->reordered_opaque = codec->reordered_opaque;
1559     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1560     else           pic->pkt_pts = AV_NOPTS_VALUE;
1561     return 0;
1562 }
1563
1564 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1565 {
1566     FilterPriv *priv = ctx->priv;
1567     AVCodecContext *codec;
1568     if(!opaque) return -1;
1569
1570     priv->is = opaque;
1571     codec    = priv->is->video_st->codec;
1572     codec->opaque = ctx;
1573     if((codec->codec->capabilities & CODEC_CAP_DR1)
1574     ) {
1575         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1576         priv->use_dr1 = 1;
1577         codec->get_buffer     = input_get_buffer;
1578         codec->release_buffer = input_release_buffer;
1579         codec->reget_buffer   = input_reget_buffer;
1580         codec->thread_safe_callbacks = 1;
1581     }
1582
1583     priv->frame = avcodec_alloc_frame();
1584
1585     return 0;
1586 }
1587
1588 static void input_uninit(AVFilterContext *ctx)
1589 {
1590     FilterPriv *priv = ctx->priv;
1591     av_free(priv->frame);
1592 }
1593
1594 static int input_request_frame(AVFilterLink *link)
1595 {
1596     FilterPriv *priv = link->src->priv;
1597     AVFilterBufferRef *picref;
1598     int64_t pts = 0;
1599     AVPacket pkt;
1600     int ret;
1601
1602     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1603         av_free_packet(&pkt);
1604     if (ret < 0)
1605         return -1;
1606
1607     if(priv->use_dr1 && priv->frame->opaque) {
1608         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1609     } else {
1610         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1611         av_image_copy(picref->data, picref->linesize,
1612                       priv->frame->data, priv->frame->linesize,
1613                       picref->format, link->w, link->h);
1614     }
1615     av_free_packet(&pkt);
1616
1617     avfilter_copy_frame_props(picref, priv->frame);
1618     picref->pts = pts;
1619
1620     avfilter_start_frame(link, picref);
1621     avfilter_draw_slice(link, 0, link->h, 1);
1622     avfilter_end_frame(link);
1623
1624     return 0;
1625 }
1626
1627 static int input_query_formats(AVFilterContext *ctx)
1628 {
1629     FilterPriv *priv = ctx->priv;
1630     enum PixelFormat pix_fmts[] = {
1631         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1632     };
1633
1634     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1635     return 0;
1636 }
1637
1638 static int input_config_props(AVFilterLink *link)
1639 {
1640     FilterPriv *priv  = link->src->priv;
1641     AVStream *s = priv->is->video_st;
1642
1643     link->w = s->codec->width;
1644     link->h = s->codec->height;
1645     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1646         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1647     link->time_base = s->time_base;
1648
1649     return 0;
1650 }
1651
1652 static AVFilter input_filter =
1653 {
1654     .name      = "ffplay_input",
1655
1656     .priv_size = sizeof(FilterPriv),
1657
1658     .init      = input_init,
1659     .uninit    = input_uninit,
1660
1661     .query_formats = input_query_formats,
1662
1663     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1664     .outputs   = (AVFilterPad[]) {{ .name = "default",
1665                                     .type = AVMEDIA_TYPE_VIDEO,
1666                                     .request_frame = input_request_frame,
1667                                     .config_props  = input_config_props, },
1668                                   { .name = NULL }},
1669 };
1670
1671 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1672 {
1673     char sws_flags_str[128];
1674     int ret;
1675     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1676     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1677     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1678     graph->scale_sws_opts = av_strdup(sws_flags_str);
1679
1680     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1681                                             NULL, is, graph)) < 0)
1682         return ret;
1683     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1684                                             NULL, pix_fmts, graph)) < 0)
1685         return ret;
1686
1687     if(vfilters) {
1688         AVFilterInOut *outputs = avfilter_inout_alloc();
1689         AVFilterInOut *inputs  = avfilter_inout_alloc();
1690
1691         outputs->name    = av_strdup("in");
1692         outputs->filter_ctx = filt_src;
1693         outputs->pad_idx = 0;
1694         outputs->next    = NULL;
1695
1696         inputs->name    = av_strdup("out");
1697         inputs->filter_ctx = filt_out;
1698         inputs->pad_idx = 0;
1699         inputs->next    = NULL;
1700
1701         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1702             return ret;
1703     } else {
1704         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1705             return ret;
1706     }
1707
1708     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1709         return ret;
1710
1711     is->out_video_filter = filt_out;
1712
1713     return ret;
1714 }
1715
1716 #endif  /* CONFIG_AVFILTER */
1717
1718 static int video_thread(void *arg)
1719 {
1720     VideoState *is = arg;
1721     AVFrame *frame= avcodec_alloc_frame();
1722     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1723     double pts;
1724     int ret;
1725
1726 #if CONFIG_AVFILTER
1727     AVFilterGraph *graph = avfilter_graph_alloc();
1728     AVFilterContext *filt_out = NULL;
1729     int last_w = is->video_st->codec->width;
1730     int last_h = is->video_st->codec->height;
1731
1732     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1733         goto the_end;
1734     filt_out = is->out_video_filter;
1735 #endif
1736
1737     for(;;) {
1738 #if !CONFIG_AVFILTER
1739         AVPacket pkt;
1740 #else
1741         AVFilterBufferRef *picref;
1742         AVRational tb = filt_out->inputs[0]->time_base;
1743 #endif
1744         while (is->paused && !is->videoq.abort_request)
1745             SDL_Delay(10);
1746 #if CONFIG_AVFILTER
1747         if (   last_w != is->video_st->codec->width
1748             || last_h != is->video_st->codec->height) {
1749             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1750                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1751             avfilter_graph_free(&graph);
1752             graph = avfilter_graph_alloc();
1753             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1754                 goto the_end;
1755             filt_out = is->out_video_filter;
1756             last_w = is->video_st->codec->width;
1757             last_h = is->video_st->codec->height;
1758         }
1759         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1760         if (picref) {
1761             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1762             pts_int = picref->pts;
1763             pos     = picref->pos;
1764             frame->opaque = picref;
1765         }
1766
1767         if (av_cmp_q(tb, is->video_st->time_base)) {
1768             av_unused int64_t pts1 = pts_int;
1769             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1770             av_dlog(NULL, "video_thread(): "
1771                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1772                     tb.num, tb.den, pts1,
1773                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1774         }
1775 #else
1776         ret = get_video_frame(is, frame, &pts_int, &pkt);
1777         pos = pkt.pos;
1778         av_free_packet(&pkt);
1779 #endif
1780
1781         if (ret < 0) goto the_end;
1782
1783 #if CONFIG_AVFILTER
1784         if (!picref)
1785             continue;
1786 #endif
1787
1788         pts = pts_int*av_q2d(is->video_st->time_base);
1789
1790         ret = queue_picture(is, frame, pts, pos);
1791
1792         if (ret < 0)
1793             goto the_end;
1794
1795         if (is->step)
1796             stream_toggle_pause(is);
1797     }
1798  the_end:
1799 #if CONFIG_AVFILTER
1800     avfilter_graph_free(&graph);
1801 #endif
1802     av_free(frame);
1803     return 0;
1804 }
1805
1806 static int subtitle_thread(void *arg)
1807 {
1808     VideoState *is = arg;
1809     SubPicture *sp;
1810     AVPacket pkt1, *pkt = &pkt1;
1811     int got_subtitle;
1812     double pts;
1813     int i, j;
1814     int r, g, b, y, u, v, a;
1815
1816     for(;;) {
1817         while (is->paused && !is->subtitleq.abort_request) {
1818             SDL_Delay(10);
1819         }
1820         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1821             break;
1822
1823         if(pkt->data == flush_pkt.data){
1824             avcodec_flush_buffers(is->subtitle_st->codec);
1825             continue;
1826         }
1827         SDL_LockMutex(is->subpq_mutex);
1828         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1829                !is->subtitleq.abort_request) {
1830             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1831         }
1832         SDL_UnlockMutex(is->subpq_mutex);
1833
1834         if (is->subtitleq.abort_request)
1835             return 0;
1836
1837         sp = &is->subpq[is->subpq_windex];
1838
1839        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1840            this packet, if any */
1841         pts = 0;
1842         if (pkt->pts != AV_NOPTS_VALUE)
1843             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1844
1845         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1846                                  &got_subtitle, pkt);
1847
1848         if (got_subtitle && sp->sub.format == 0) {
1849             sp->pts = pts;
1850
1851             for (i = 0; i < sp->sub.num_rects; i++)
1852             {
1853                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1854                 {
1855                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1856                     y = RGB_TO_Y_CCIR(r, g, b);
1857                     u = RGB_TO_U_CCIR(r, g, b, 0);
1858                     v = RGB_TO_V_CCIR(r, g, b, 0);
1859                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1860                 }
1861             }
1862
1863             /* now we can update the picture count */
1864             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1865                 is->subpq_windex = 0;
1866             SDL_LockMutex(is->subpq_mutex);
1867             is->subpq_size++;
1868             SDL_UnlockMutex(is->subpq_mutex);
1869         }
1870         av_free_packet(pkt);
1871     }
1872     return 0;
1873 }
1874
1875 /* copy samples for viewing in editor window */
1876 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1877 {
1878     int size, len;
1879
1880     size = samples_size / sizeof(short);
1881     while (size > 0) {
1882         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1883         if (len > size)
1884             len = size;
1885         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1886         samples += len;
1887         is->sample_array_index += len;
1888         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1889             is->sample_array_index = 0;
1890         size -= len;
1891     }
1892 }
1893
1894 /* return the new audio buffer size (samples can be added or deleted
1895    to get better sync if video or external master clock) */
1896 static int synchronize_audio(VideoState *is, short *samples,
1897                              int samples_size1, double pts)
1898 {
1899     int n, samples_size;
1900     double ref_clock;
1901
1902     n = 2 * is->audio_st->codec->channels;
1903     samples_size = samples_size1;
1904
1905     /* if not master, then we try to remove or add samples to correct the clock */
1906     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1907          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1908         double diff, avg_diff;
1909         int wanted_size, min_size, max_size, nb_samples;
1910
1911         ref_clock = get_master_clock(is);
1912         diff = get_audio_clock(is) - ref_clock;
1913
1914         if (diff < AV_NOSYNC_THRESHOLD) {
1915             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1916             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1917                 /* not enough measures to have a correct estimate */
1918                 is->audio_diff_avg_count++;
1919             } else {
1920                 /* estimate the A-V difference */
1921                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1922
1923                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1924                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1925                     nb_samples = samples_size / n;
1926
1927                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1928                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1929                     if (wanted_size < min_size)
1930                         wanted_size = min_size;
1931                     else if (wanted_size > max_size)
1932                         wanted_size = max_size;
1933
1934                     /* add or remove samples to correction the synchro */
1935                     if (wanted_size < samples_size) {
1936                         /* remove samples */
1937                         samples_size = wanted_size;
1938                     } else if (wanted_size > samples_size) {
1939                         uint8_t *samples_end, *q;
1940                         int nb;
1941
1942                         /* add samples */
1943                         nb = (samples_size - wanted_size);
1944                         samples_end = (uint8_t *)samples + samples_size - n;
1945                         q = samples_end + n;
1946                         while (nb > 0) {
1947                             memcpy(q, samples_end, n);
1948                             q += n;
1949                             nb -= n;
1950                         }
1951                         samples_size = wanted_size;
1952                     }
1953                 }
1954 #if 0
1955                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1956                        diff, avg_diff, samples_size - samples_size1,
1957                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1958 #endif
1959             }
1960         } else {
1961             /* too big difference : may be initial PTS errors, so
1962                reset A-V filter */
1963             is->audio_diff_avg_count = 0;
1964             is->audio_diff_cum = 0;
1965         }
1966     }
1967
1968     return samples_size;
1969 }
1970
1971 /* decode one audio frame and returns its uncompressed size */
1972 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1973 {
1974     AVPacket *pkt_temp = &is->audio_pkt_temp;
1975     AVPacket *pkt = &is->audio_pkt;
1976     AVCodecContext *dec= is->audio_st->codec;
1977     int n, len1, data_size;
1978     double pts;
1979
1980     for(;;) {
1981         /* NOTE: the audio packet can contain several frames */
1982         while (pkt_temp->size > 0) {
1983             data_size = sizeof(is->audio_buf1);
1984             len1 = avcodec_decode_audio3(dec,
1985                                         (int16_t *)is->audio_buf1, &data_size,
1986                                         pkt_temp);
1987             if (len1 < 0) {
1988                 /* if error, we skip the frame */
1989                 pkt_temp->size = 0;
1990                 break;
1991             }
1992
1993             pkt_temp->data += len1;
1994             pkt_temp->size -= len1;
1995             if (data_size <= 0)
1996                 continue;
1997
1998             if (dec->sample_fmt != is->audio_src_fmt) {
1999                 if (is->reformat_ctx)
2000                     av_audio_convert_free(is->reformat_ctx);
2001                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2002                                                          dec->sample_fmt, 1, NULL, 0);
2003                 if (!is->reformat_ctx) {
2004                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2005                         av_get_sample_fmt_name(dec->sample_fmt),
2006                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2007                         break;
2008                 }
2009                 is->audio_src_fmt= dec->sample_fmt;
2010             }
2011
2012             if (is->reformat_ctx) {
2013                 const void *ibuf[6]= {is->audio_buf1};
2014                 void *obuf[6]= {is->audio_buf2};
2015                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2016                 int ostride[6]= {2};
2017                 int len= data_size/istride[0];
2018                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2019                     printf("av_audio_convert() failed\n");
2020                     break;
2021                 }
2022                 is->audio_buf= is->audio_buf2;
2023                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2024                           remove this legacy cruft */
2025                 data_size= len*2;
2026             }else{
2027                 is->audio_buf= is->audio_buf1;
2028             }
2029
2030             /* if no pts, then compute it */
2031             pts = is->audio_clock;
2032             *pts_ptr = pts;
2033             n = 2 * dec->channels;
2034             is->audio_clock += (double)data_size /
2035                 (double)(n * dec->sample_rate);
2036 #ifdef DEBUG
2037             {
2038                 static double last_clock;
2039                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2040                        is->audio_clock - last_clock,
2041                        is->audio_clock, pts);
2042                 last_clock = is->audio_clock;
2043             }
2044 #endif
2045             return data_size;
2046         }
2047
2048         /* free the current packet */
2049         if (pkt->data)
2050             av_free_packet(pkt);
2051
2052         if (is->paused || is->audioq.abort_request) {
2053             return -1;
2054         }
2055
2056         /* read next packet */
2057         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2058             return -1;
2059         if(pkt->data == flush_pkt.data){
2060             avcodec_flush_buffers(dec);
2061             continue;
2062         }
2063
2064         pkt_temp->data = pkt->data;
2065         pkt_temp->size = pkt->size;
2066
2067         /* if update the audio clock with the pts */
2068         if (pkt->pts != AV_NOPTS_VALUE) {
2069             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2070         }
2071     }
2072 }
2073
2074 /* prepare a new audio buffer */
2075 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2076 {
2077     VideoState *is = opaque;
2078     int audio_size, len1;
2079     int bytes_per_sec;
2080     double pts;
2081
2082     audio_callback_time = av_gettime();
2083
2084     while (len > 0) {
2085         if (is->audio_buf_index >= is->audio_buf_size) {
2086            audio_size = audio_decode_frame(is, &pts);
2087            if (audio_size < 0) {
2088                 /* if error, just output silence */
2089                is->audio_buf = is->audio_buf1;
2090                is->audio_buf_size = 1024;
2091                memset(is->audio_buf, 0, is->audio_buf_size);
2092            } else {
2093                if (is->show_mode != SHOW_MODE_VIDEO)
2094                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2095                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2096                                               pts);
2097                is->audio_buf_size = audio_size;
2098            }
2099            is->audio_buf_index = 0;
2100         }
2101         len1 = is->audio_buf_size - is->audio_buf_index;
2102         if (len1 > len)
2103             len1 = len;
2104         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2105         len -= len1;
2106         stream += len1;
2107         is->audio_buf_index += len1;
2108     }
2109     bytes_per_sec = is->audio_st->codec->sample_rate *
2110             2 * is->audio_st->codec->channels;
2111     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2112     /* Let's assume the audio driver that is used by SDL has two periods. */
2113     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2114     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2115 }
2116
2117 /* open a given stream. Return 0 if OK */
2118 static int stream_component_open(VideoState *is, int stream_index)
2119 {
2120     AVFormatContext *ic = is->ic;
2121     AVCodecContext *avctx;
2122     AVCodec *codec;
2123     SDL_AudioSpec wanted_spec, spec;
2124     AVDictionary *opts;
2125     AVDictionaryEntry *t = NULL;
2126
2127     if (stream_index < 0 || stream_index >= ic->nb_streams)
2128         return -1;
2129     avctx = ic->streams[stream_index]->codec;
2130
2131     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2132
2133     /* prepare audio output */
2134     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2135         if (avctx->channels > 0) {
2136             avctx->request_channels = FFMIN(2, avctx->channels);
2137         } else {
2138             avctx->request_channels = 2;
2139         }
2140     }
2141
2142     codec = avcodec_find_decoder(avctx->codec_id);
2143     if (!codec)
2144         return -1;
2145
2146     avctx->workaround_bugs = workaround_bugs;
2147     avctx->lowres = lowres;
2148     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2149     avctx->idct_algo= idct;
2150     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2151     avctx->skip_frame= skip_frame;
2152     avctx->skip_idct= skip_idct;
2153     avctx->skip_loop_filter= skip_loop_filter;
2154     avctx->error_recognition= error_recognition;
2155     avctx->error_concealment= error_concealment;
2156     avctx->thread_count= thread_count;
2157
2158     if(codec->capabilities & CODEC_CAP_DR1)
2159         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2160
2161     if (!codec ||
2162         avcodec_open2(avctx, codec, &opts) < 0)
2163         return -1;
2164     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2165         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2166         return AVERROR_OPTION_NOT_FOUND;
2167     }
2168
2169     /* prepare audio output */
2170     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2171         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2172             fprintf(stderr, "Invalid sample rate or channel count\n");
2173             return -1;
2174         }
2175         wanted_spec.freq = avctx->sample_rate;
2176         wanted_spec.format = AUDIO_S16SYS;
2177         wanted_spec.channels = avctx->channels;
2178         wanted_spec.silence = 0;
2179         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2180         wanted_spec.callback = sdl_audio_callback;
2181         wanted_spec.userdata = is;
2182         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2183             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2184             return -1;
2185         }
2186         is->audio_hw_buf_size = spec.size;
2187         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2188     }
2189
2190     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2191     switch(avctx->codec_type) {
2192     case AVMEDIA_TYPE_AUDIO:
2193         is->audio_stream = stream_index;
2194         is->audio_st = ic->streams[stream_index];
2195         is->audio_buf_size = 0;
2196         is->audio_buf_index = 0;
2197
2198         /* init averaging filter */
2199         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2200         is->audio_diff_avg_count = 0;
2201         /* since we do not have a precise anough audio fifo fullness,
2202            we correct audio sync only if larger than this threshold */
2203         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2204
2205         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2206         packet_queue_init(&is->audioq);
2207         SDL_PauseAudio(0);
2208         break;
2209     case AVMEDIA_TYPE_VIDEO:
2210         is->video_stream = stream_index;
2211         is->video_st = ic->streams[stream_index];
2212
2213         packet_queue_init(&is->videoq);
2214         is->video_tid = SDL_CreateThread(video_thread, is);
2215         break;
2216     case AVMEDIA_TYPE_SUBTITLE:
2217         is->subtitle_stream = stream_index;
2218         is->subtitle_st = ic->streams[stream_index];
2219         packet_queue_init(&is->subtitleq);
2220
2221         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2222         break;
2223     default:
2224         break;
2225     }
2226     return 0;
2227 }
2228
2229 static void stream_component_close(VideoState *is, int stream_index)
2230 {
2231     AVFormatContext *ic = is->ic;
2232     AVCodecContext *avctx;
2233
2234     if (stream_index < 0 || stream_index >= ic->nb_streams)
2235         return;
2236     avctx = ic->streams[stream_index]->codec;
2237
2238     switch(avctx->codec_type) {
2239     case AVMEDIA_TYPE_AUDIO:
2240         packet_queue_abort(&is->audioq);
2241
2242         SDL_CloseAudio();
2243
2244         packet_queue_end(&is->audioq);
2245         if (is->reformat_ctx)
2246             av_audio_convert_free(is->reformat_ctx);
2247         is->reformat_ctx = NULL;
2248         break;
2249     case AVMEDIA_TYPE_VIDEO:
2250         packet_queue_abort(&is->videoq);
2251
2252         /* note: we also signal this mutex to make sure we deblock the
2253            video thread in all cases */
2254         SDL_LockMutex(is->pictq_mutex);
2255         SDL_CondSignal(is->pictq_cond);
2256         SDL_UnlockMutex(is->pictq_mutex);
2257
2258         SDL_WaitThread(is->video_tid, NULL);
2259
2260         packet_queue_end(&is->videoq);
2261         break;
2262     case AVMEDIA_TYPE_SUBTITLE:
2263         packet_queue_abort(&is->subtitleq);
2264
2265         /* note: we also signal this mutex to make sure we deblock the
2266            video thread in all cases */
2267         SDL_LockMutex(is->subpq_mutex);
2268         is->subtitle_stream_changed = 1;
2269
2270         SDL_CondSignal(is->subpq_cond);
2271         SDL_UnlockMutex(is->subpq_mutex);
2272
2273         SDL_WaitThread(is->subtitle_tid, NULL);
2274
2275         packet_queue_end(&is->subtitleq);
2276         break;
2277     default:
2278         break;
2279     }
2280
2281     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2282     avcodec_close(avctx);
2283     switch(avctx->codec_type) {
2284     case AVMEDIA_TYPE_AUDIO:
2285         is->audio_st = NULL;
2286         is->audio_stream = -1;
2287         break;
2288     case AVMEDIA_TYPE_VIDEO:
2289         is->video_st = NULL;
2290         is->video_stream = -1;
2291         break;
2292     case AVMEDIA_TYPE_SUBTITLE:
2293         is->subtitle_st = NULL;
2294         is->subtitle_stream = -1;
2295         break;
2296     default:
2297         break;
2298     }
2299 }
2300
2301 /* since we have only one decoding thread, we can use a global
2302    variable instead of a thread local variable */
2303 static VideoState *global_video_state;
2304
2305 static int decode_interrupt_cb(void)
2306 {
2307     return (global_video_state && global_video_state->abort_request);
2308 }
2309
2310 /* this thread gets the stream from the disk or the network */
2311 static int read_thread(void *arg)
2312 {
2313     VideoState *is = arg;
2314     AVFormatContext *ic = NULL;
2315     int err, i, ret;
2316     int st_index[AVMEDIA_TYPE_NB];
2317     AVPacket pkt1, *pkt = &pkt1;
2318     int eof=0;
2319     int pkt_in_play_range = 0;
2320     AVDictionaryEntry *t;
2321     AVDictionary **opts;
2322     int orig_nb_streams;
2323
2324     memset(st_index, -1, sizeof(st_index));
2325     is->video_stream = -1;
2326     is->audio_stream = -1;
2327     is->subtitle_stream = -1;
2328
2329     global_video_state = is;
2330     avio_set_interrupt_cb(decode_interrupt_cb);
2331
2332     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2333     if (err < 0) {
2334         print_error(is->filename, err);
2335         ret = -1;
2336         goto fail;
2337     }
2338     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2339         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2340         ret = AVERROR_OPTION_NOT_FOUND;
2341         goto fail;
2342     }
2343     is->ic = ic;
2344
2345     if(genpts)
2346         ic->flags |= AVFMT_FLAG_GENPTS;
2347
2348     opts = setup_find_stream_info_opts(ic, codec_opts);
2349     orig_nb_streams = ic->nb_streams;
2350
2351     err = avformat_find_stream_info(ic, opts);
2352     if (err < 0) {
2353         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2354         ret = -1;
2355         goto fail;
2356     }
2357     for (i = 0; i < orig_nb_streams; i++)
2358         av_dict_free(&opts[i]);
2359     av_freep(&opts);
2360
2361     if(ic->pb)
2362         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2363
2364     if(seek_by_bytes<0)
2365         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2366
2367     /* if seeking requested, we execute it */
2368     if (start_time != AV_NOPTS_VALUE) {
2369         int64_t timestamp;
2370
2371         timestamp = start_time;
2372         /* add the stream start time */
2373         if (ic->start_time != AV_NOPTS_VALUE)
2374             timestamp += ic->start_time;
2375         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2376         if (ret < 0) {
2377             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2378                     is->filename, (double)timestamp / AV_TIME_BASE);
2379         }
2380     }
2381
2382     for (i = 0; i < ic->nb_streams; i++)
2383         ic->streams[i]->discard = AVDISCARD_ALL;
2384     if (!video_disable)
2385         st_index[AVMEDIA_TYPE_VIDEO] =
2386             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2387                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2388     if (!audio_disable)
2389         st_index[AVMEDIA_TYPE_AUDIO] =
2390             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2391                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2392                                 st_index[AVMEDIA_TYPE_VIDEO],
2393                                 NULL, 0);
2394     if (!video_disable)
2395         st_index[AVMEDIA_TYPE_SUBTITLE] =
2396             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2397                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2398                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2399                                  st_index[AVMEDIA_TYPE_AUDIO] :
2400                                  st_index[AVMEDIA_TYPE_VIDEO]),
2401                                 NULL, 0);
2402     if (show_status) {
2403         av_dump_format(ic, 0, is->filename, 0);
2404     }
2405
2406     is->show_mode = show_mode;
2407
2408     /* open the streams */
2409     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2410         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2411     }
2412
2413     ret=-1;
2414     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2415         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2416     }
2417     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2418     if (is->show_mode == SHOW_MODE_NONE)
2419         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2420
2421     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2422         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2423     }
2424
2425     if (is->video_stream < 0 && is->audio_stream < 0) {
2426         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2427         ret = -1;
2428         goto fail;
2429     }
2430
2431     for(;;) {
2432         if (is->abort_request)
2433             break;
2434         if (is->paused != is->last_paused) {
2435             is->last_paused = is->paused;
2436             if (is->paused)
2437                 is->read_pause_return= av_read_pause(ic);
2438             else
2439                 av_read_play(ic);
2440         }
2441 #if CONFIG_RTSP_DEMUXER
2442         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2443             /* wait 10 ms to avoid trying to get another packet */
2444             /* XXX: horrible */
2445             SDL_Delay(10);
2446             continue;
2447         }
2448 #endif
2449         if (is->seek_req) {
2450             int64_t seek_target= is->seek_pos;
2451             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2452             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2453 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2454 //      of the seek_pos/seek_rel variables
2455
2456             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2457             if (ret < 0) {
2458                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2459             }else{
2460                 if (is->audio_stream >= 0) {
2461                     packet_queue_flush(&is->audioq);
2462                     packet_queue_put(&is->audioq, &flush_pkt);
2463                 }
2464                 if (is->subtitle_stream >= 0) {
2465                     packet_queue_flush(&is->subtitleq);
2466                     packet_queue_put(&is->subtitleq, &flush_pkt);
2467                 }
2468                 if (is->video_stream >= 0) {
2469                     packet_queue_flush(&is->videoq);
2470                     packet_queue_put(&is->videoq, &flush_pkt);
2471                 }
2472             }
2473             is->seek_req = 0;
2474             eof= 0;
2475         }
2476
2477         /* if the queue are full, no need to read more */
2478         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2479             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2480                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2481                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2482             /* wait 10 ms */
2483             SDL_Delay(10);
2484             continue;
2485         }
2486         if(eof) {
2487             if(is->video_stream >= 0){
2488                 av_init_packet(pkt);
2489                 pkt->data=NULL;
2490                 pkt->size=0;
2491                 pkt->stream_index= is->video_stream;
2492                 packet_queue_put(&is->videoq, pkt);
2493             }
2494             SDL_Delay(10);
2495             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2496                 if(loop!=1 && (!loop || --loop)){
2497                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2498                 }else if(autoexit){
2499                     ret=AVERROR_EOF;
2500                     goto fail;
2501                 }
2502             }
2503             eof=0;
2504             continue;
2505         }
2506         ret = av_read_frame(ic, pkt);
2507         if (ret < 0) {
2508             if (ret == AVERROR_EOF || url_feof(ic->pb))
2509                 eof=1;
2510             if (ic->pb && ic->pb->error)
2511                 break;
2512             SDL_Delay(100); /* wait for user event */
2513             continue;
2514         }
2515         /* check if packet is in play range specified by user, then queue, otherwise discard */
2516         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2517                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2518                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2519                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2520                 <= ((double)duration/1000000);
2521         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2522             packet_queue_put(&is->audioq, pkt);
2523         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2524             packet_queue_put(&is->videoq, pkt);
2525         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2526             packet_queue_put(&is->subtitleq, pkt);
2527         } else {
2528             av_free_packet(pkt);
2529         }
2530     }
2531     /* wait until the end */
2532     while (!is->abort_request) {
2533         SDL_Delay(100);
2534     }
2535
2536     ret = 0;
2537  fail:
2538     /* disable interrupting */
2539     global_video_state = NULL;
2540
2541     /* close each stream */
2542     if (is->audio_stream >= 0)
2543         stream_component_close(is, is->audio_stream);
2544     if (is->video_stream >= 0)
2545         stream_component_close(is, is->video_stream);
2546     if (is->subtitle_stream >= 0)
2547         stream_component_close(is, is->subtitle_stream);
2548     if (is->ic) {
2549         av_close_input_file(is->ic);
2550         is->ic = NULL; /* safety */
2551     }
2552     avio_set_interrupt_cb(NULL);
2553
2554     if (ret != 0) {
2555         SDL_Event event;
2556
2557         event.type = FF_QUIT_EVENT;
2558         event.user.data1 = is;
2559         SDL_PushEvent(&event);
2560     }
2561     return 0;
2562 }
2563
2564 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2565 {
2566     VideoState *is;
2567
2568     is = av_mallocz(sizeof(VideoState));
2569     if (!is)
2570         return NULL;
2571     av_strlcpy(is->filename, filename, sizeof(is->filename));
2572     is->iformat = iformat;
2573     is->ytop = 0;
2574     is->xleft = 0;
2575
2576     /* start video display */
2577     is->pictq_mutex = SDL_CreateMutex();
2578     is->pictq_cond = SDL_CreateCond();
2579
2580     is->subpq_mutex = SDL_CreateMutex();
2581     is->subpq_cond = SDL_CreateCond();
2582
2583     is->av_sync_type = av_sync_type;
2584     is->read_tid = SDL_CreateThread(read_thread, is);
2585     if (!is->read_tid) {
2586         av_free(is);
2587         return NULL;
2588     }
2589     return is;
2590 }
2591
2592 static void stream_cycle_channel(VideoState *is, int codec_type)
2593 {
2594     AVFormatContext *ic = is->ic;
2595     int start_index, stream_index;
2596     AVStream *st;
2597
2598     if (codec_type == AVMEDIA_TYPE_VIDEO)
2599         start_index = is->video_stream;
2600     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2601         start_index = is->audio_stream;
2602     else
2603         start_index = is->subtitle_stream;
2604     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2605         return;
2606     stream_index = start_index;
2607     for(;;) {
2608         if (++stream_index >= is->ic->nb_streams)
2609         {
2610             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2611             {
2612                 stream_index = -1;
2613                 goto the_end;
2614             } else
2615                 stream_index = 0;
2616         }
2617         if (stream_index == start_index)
2618             return;
2619         st = ic->streams[stream_index];
2620         if (st->codec->codec_type == codec_type) {
2621             /* check that parameters are OK */
2622             switch(codec_type) {
2623             case AVMEDIA_TYPE_AUDIO:
2624                 if (st->codec->sample_rate != 0 &&
2625                     st->codec->channels != 0)
2626                     goto the_end;
2627                 break;
2628             case AVMEDIA_TYPE_VIDEO:
2629             case AVMEDIA_TYPE_SUBTITLE:
2630                 goto the_end;
2631             default:
2632                 break;
2633             }
2634         }
2635     }
2636  the_end:
2637     stream_component_close(is, start_index);
2638     stream_component_open(is, stream_index);
2639 }
2640
2641
2642 static void toggle_full_screen(VideoState *is)
2643 {
2644     is_full_screen = !is_full_screen;
2645     video_open(is);
2646 }
2647
2648 static void toggle_pause(VideoState *is)
2649 {
2650     stream_toggle_pause(is);
2651     is->step = 0;
2652 }
2653
2654 static void step_to_next_frame(VideoState *is)
2655 {
2656     /* if the stream is paused unpause it, then step */
2657     if (is->paused)
2658         stream_toggle_pause(is);
2659     is->step = 1;
2660 }
2661
2662 static void toggle_audio_display(VideoState *is)
2663 {
2664     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2665     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2666     fill_rectangle(screen,
2667                 is->xleft, is->ytop, is->width, is->height,
2668                 bgcolor);
2669     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2670 }
2671
2672 /* handle an event sent by the GUI */
2673 static void event_loop(VideoState *cur_stream)
2674 {
2675     SDL_Event event;
2676     double incr, pos, frac;
2677
2678     for(;;) {
2679         double x;
2680         SDL_WaitEvent(&event);
2681         switch(event.type) {
2682         case SDL_KEYDOWN:
2683             if (exit_on_keydown) {
2684                 do_exit(cur_stream);
2685                 break;
2686             }
2687             switch(event.key.keysym.sym) {
2688             case SDLK_ESCAPE:
2689             case SDLK_q:
2690                 do_exit(cur_stream);
2691                 break;
2692             case SDLK_f:
2693                 toggle_full_screen(cur_stream);
2694                 break;
2695             case SDLK_p:
2696             case SDLK_SPACE:
2697                 if (cur_stream)
2698                     toggle_pause(cur_stream);
2699                 break;
2700             case SDLK_s: //S: Step to next frame
2701                 if (cur_stream)
2702                     step_to_next_frame(cur_stream);
2703                 break;
2704             case SDLK_a:
2705                 if (cur_stream)
2706                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2707                 break;
2708             case SDLK_v:
2709                 if (cur_stream)
2710                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2711                 break;
2712             case SDLK_t:
2713                 if (cur_stream)
2714                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2715                 break;
2716             case SDLK_w:
2717                 if (cur_stream)
2718                     toggle_audio_display(cur_stream);
2719                 break;
2720             case SDLK_LEFT:
2721                 incr = -10.0;
2722                 goto do_seek;
2723             case SDLK_RIGHT:
2724                 incr = 10.0;
2725                 goto do_seek;
2726             case SDLK_UP:
2727                 incr = 60.0;
2728                 goto do_seek;
2729             case SDLK_DOWN:
2730                 incr = -60.0;
2731             do_seek:
2732                 if (cur_stream) {
2733                     if (seek_by_bytes) {
2734                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2735                             pos= cur_stream->video_current_pos;
2736                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2737                             pos= cur_stream->audio_pkt.pos;
2738                         }else
2739                             pos = avio_tell(cur_stream->ic->pb);
2740                         if (cur_stream->ic->bit_rate)
2741                             incr *= cur_stream->ic->bit_rate / 8.0;
2742                         else
2743                             incr *= 180000.0;
2744                         pos += incr;
2745                         stream_seek(cur_stream, pos, incr, 1);
2746                     } else {
2747                         pos = get_master_clock(cur_stream);
2748                         pos += incr;
2749                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2750                     }
2751                 }
2752                 break;
2753             default:
2754                 break;
2755             }
2756             break;
2757         case SDL_MOUSEBUTTONDOWN:
2758             if (exit_on_mousedown) {
2759                 do_exit(cur_stream);
2760                 break;
2761             }
2762         case SDL_MOUSEMOTION:
2763             if(event.type ==SDL_MOUSEBUTTONDOWN){
2764                 x= event.button.x;
2765             }else{
2766                 if(event.motion.state != SDL_PRESSED)
2767                     break;
2768                 x= event.motion.x;
2769             }
2770             if (cur_stream) {
2771                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2772                     uint64_t size=  avio_size(cur_stream->ic->pb);
2773                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2774                 }else{
2775                     int64_t ts;
2776                     int ns, hh, mm, ss;
2777                     int tns, thh, tmm, tss;
2778                     tns = cur_stream->ic->duration/1000000LL;
2779                     thh = tns/3600;
2780                     tmm = (tns%3600)/60;
2781                     tss = (tns%60);
2782                     frac = x/cur_stream->width;
2783                     ns = frac*tns;
2784                     hh = ns/3600;
2785                     mm = (ns%3600)/60;
2786                     ss = (ns%60);
2787                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2788                             hh, mm, ss, thh, tmm, tss);
2789                     ts = frac*cur_stream->ic->duration;
2790                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2791                         ts += cur_stream->ic->start_time;
2792                     stream_seek(cur_stream, ts, 0, 0);
2793                 }
2794             }
2795             break;
2796         case SDL_VIDEORESIZE:
2797             if (cur_stream) {
2798                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2799                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2800                 screen_width = cur_stream->width = event.resize.w;
2801                 screen_height= cur_stream->height= event.resize.h;
2802             }
2803             break;
2804         case SDL_QUIT:
2805         case FF_QUIT_EVENT:
2806             do_exit(cur_stream);
2807             break;
2808         case FF_ALLOC_EVENT:
2809             video_open(event.user.data1);
2810             alloc_picture(event.user.data1);
2811             break;
2812         case FF_REFRESH_EVENT:
2813             video_refresh(event.user.data1);
2814             cur_stream->refresh=0;
2815             break;
2816         default:
2817             break;
2818         }
2819     }
2820 }
2821
2822 static int opt_frame_size(const char *opt, const char *arg)
2823 {
2824     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2825     return opt_default("video_size", arg);
2826 }
2827
2828 static int opt_width(const char *opt, const char *arg)
2829 {
2830     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2831     return 0;
2832 }
2833
2834 static int opt_height(const char *opt, const char *arg)
2835 {
2836     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2837     return 0;
2838 }
2839
2840 static int opt_format(const char *opt, const char *arg)
2841 {
2842     file_iformat = av_find_input_format(arg);
2843     if (!file_iformat) {
2844         fprintf(stderr, "Unknown input format: %s\n", arg);
2845         return AVERROR(EINVAL);
2846     }
2847     return 0;
2848 }
2849
2850 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2851 {
2852     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2853     return opt_default("pixel_format", arg);
2854 }
2855
2856 static int opt_sync(const char *opt, const char *arg)
2857 {
2858     if (!strcmp(arg, "audio"))
2859         av_sync_type = AV_SYNC_AUDIO_MASTER;
2860     else if (!strcmp(arg, "video"))
2861         av_sync_type = AV_SYNC_VIDEO_MASTER;
2862     else if (!strcmp(arg, "ext"))
2863         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2864     else {
2865         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2866         exit(1);
2867     }
2868     return 0;
2869 }
2870
2871 static int opt_seek(const char *opt, const char *arg)
2872 {
2873     start_time = parse_time_or_die(opt, arg, 1);
2874     return 0;
2875 }
2876
2877 static int opt_duration(const char *opt, const char *arg)
2878 {
2879     duration = parse_time_or_die(opt, arg, 1);
2880     return 0;
2881 }
2882
2883 static int opt_thread_count(const char *opt, const char *arg)
2884 {
2885     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2886 #if !HAVE_THREADS
2887     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2888 #endif
2889     return 0;
2890 }
2891
2892 static int opt_show_mode(const char *opt, const char *arg)
2893 {
2894     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2895                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2896                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2897                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2898     return 0;
2899 }
2900
2901 static int opt_input_file(const char *opt, const char *filename)
2902 {
2903     if (input_filename) {
2904         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2905                 filename, input_filename);
2906         exit(1);
2907     }
2908     if (!strcmp(filename, "-"))
2909         filename = "pipe:";
2910     input_filename = filename;
2911     return 0;
2912 }
2913
2914 static const OptionDef options[] = {
2915 #include "cmdutils_common_opts.h"
2916     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2917     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2918     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2919     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2920     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2921     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2922     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2923     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2924     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2925     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2926     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2927     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2928     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2929     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2930     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2931     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2932     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2933     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2934     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2935     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2936     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2937     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2938     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2939     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2940     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2941     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2942     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2943     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2944     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2945     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2946     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2947     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2948     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2949     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2950     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2951 #if CONFIG_AVFILTER
2952     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2953 #endif
2954     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2955     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2956     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2957     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2958     { NULL, },
2959 };
2960
2961 static void show_usage(void)
2962 {
2963     printf("Simple media player\n");
2964     printf("usage: %s [options] input_file\n", program_name);
2965     printf("\n");
2966 }
2967
2968 static int opt_help(const char *opt, const char *arg)
2969 {
2970     av_log_set_callback(log_callback_help);
2971     show_usage();
2972     show_help_options(options, "Main options:\n",
2973                       OPT_EXPERT, 0);
2974     show_help_options(options, "\nAdvanced options:\n",
2975                       OPT_EXPERT, OPT_EXPERT);
2976     printf("\n");
2977     av_opt_show2(avcodec_opts[0], NULL,
2978                  AV_OPT_FLAG_DECODING_PARAM, 0);
2979     printf("\n");
2980     av_opt_show2(avformat_opts, NULL,
2981                  AV_OPT_FLAG_DECODING_PARAM, 0);
2982 #if !CONFIG_AVFILTER
2983     printf("\n");
2984     av_opt_show2(sws_opts, NULL,
2985                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2986 #endif
2987     printf("\nWhile playing:\n"
2988            "q, ESC              quit\n"
2989            "f                   toggle full screen\n"
2990            "p, SPC              pause\n"
2991            "a                   cycle audio channel\n"
2992            "v                   cycle video channel\n"
2993            "t                   cycle subtitle channel\n"
2994            "w                   show audio waves\n"
2995            "s                   activate frame-step mode\n"
2996            "left/right          seek backward/forward 10 seconds\n"
2997            "down/up             seek backward/forward 1 minute\n"
2998            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2999            );
3000     return 0;
3001 }
3002
3003 /* Called from the main */
3004 int main(int argc, char **argv)
3005 {
3006     int flags;
3007     VideoState *is;
3008
3009     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3010
3011     /* register all codecs, demux and protocols */
3012     avcodec_register_all();
3013 #if CONFIG_AVDEVICE
3014     avdevice_register_all();
3015 #endif
3016 #if CONFIG_AVFILTER
3017     avfilter_register_all();
3018 #endif
3019     av_register_all();
3020
3021     init_opts();
3022
3023     show_banner();
3024
3025     parse_options(argc, argv, options, opt_input_file);
3026
3027     if (!input_filename) {
3028         show_usage();
3029         fprintf(stderr, "An input file must be specified\n");
3030         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3031         exit(1);
3032     }
3033
3034     if (display_disable) {
3035         video_disable = 1;
3036     }
3037     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3038     if (audio_disable)
3039         flags &= ~SDL_INIT_AUDIO;
3040 #if !defined(__MINGW32__) && !defined(__APPLE__)
3041     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3042 #endif
3043     if (SDL_Init (flags)) {
3044         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3045         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3046         exit(1);
3047     }
3048
3049     if (!display_disable) {
3050 #if HAVE_SDL_VIDEO_SIZE
3051         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3052         fs_screen_width = vi->current_w;
3053         fs_screen_height = vi->current_h;
3054 #endif
3055     }
3056
3057     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3058     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3059     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3060
3061     av_init_packet(&flush_pkt);
3062     flush_pkt.data= "FLUSH";
3063
3064     is = stream_open(input_filename, file_iformat);
3065     if (!is) {
3066         fprintf(stderr, "Failed to initialize VideoState!\n");
3067         do_exit(NULL);
3068     }
3069
3070     event_loop(is);
3071
3072     /* never returns */
3073
3074     return 0;
3075 }