Make all option parsing functions match the function pointer type through which they...
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/parseutils.h"
32 #include "libavutil/samplefmt.h"
33 #include "libavutil/avassert.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avcodec.h"
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/vsink_buffer.h"
46 #endif
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #include "cmdutils.h"
52
53 #include <unistd.h>
54 #include <assert.h>
55
56 const char program_name[] = "ffplay";
57 const int program_birth_year = 2003;
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 static int sws_flags = SWS_BICUBIC;
84
85 typedef struct PacketQueue {
86     AVPacketList *first_pkt, *last_pkt;
87     int nb_packets;
88     int size;
89     int abort_request;
90     SDL_mutex *mutex;
91     SDL_cond *cond;
92 } PacketQueue;
93
94 #define VIDEO_PICTURE_QUEUE_SIZE 2
95 #define SUBPICTURE_QUEUE_SIZE 4
96
97 typedef struct VideoPicture {
98     double pts;                                  ///<presentation time stamp for this picture
99     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100     int64_t pos;                                 ///<byte position in file
101     SDL_Overlay *bmp;
102     int width, height; /* source height & width */
103     int allocated;
104     enum PixelFormat pix_fmt;
105
106 #if CONFIG_AVFILTER
107     AVFilterBufferRef *picref;
108 #endif
109 } VideoPicture;
110
111 typedef struct SubPicture {
112     double pts; /* presentation time stamp for this picture */
113     AVSubtitle sub;
114 } SubPicture;
115
116 enum {
117     AV_SYNC_AUDIO_MASTER, /* default choice */
118     AV_SYNC_VIDEO_MASTER,
119     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120 };
121
122 typedef struct VideoState {
123     SDL_Thread *read_tid;
124     SDL_Thread *video_tid;
125     SDL_Thread *refresh_tid;
126     AVInputFormat *iformat;
127     int no_background;
128     int abort_request;
129     int paused;
130     int last_paused;
131     int seek_req;
132     int seek_flags;
133     int64_t seek_pos;
134     int64_t seek_rel;
135     int read_pause_return;
136     AVFormatContext *ic;
137
138     int audio_stream;
139
140     int av_sync_type;
141     double external_clock; /* external clock base */
142     int64_t external_clock_time;
143
144     double audio_clock;
145     double audio_diff_cum; /* used for AV difference average computation */
146     double audio_diff_avg_coef;
147     double audio_diff_threshold;
148     int audio_diff_avg_count;
149     AVStream *audio_st;
150     PacketQueue audioq;
151     int audio_hw_buf_size;
152     /* samples output by the codec. we reserve more space for avsync
153        compensation */
154     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
155     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     uint8_t *audio_buf;
157     unsigned int audio_buf_size; /* in bytes */
158     int audio_buf_index; /* in bytes */
159     AVPacket audio_pkt_temp;
160     AVPacket audio_pkt;
161     enum AVSampleFormat audio_src_fmt;
162     AVAudioConvert *reformat_ctx;
163
164     enum ShowMode {
165         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
166     } show_mode;
167     int16_t sample_array[SAMPLE_ARRAY_SIZE];
168     int sample_array_index;
169     int last_i_start;
170     RDFTContext *rdft;
171     int rdft_bits;
172     FFTSample *rdft_data;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     char filename[1024];
204     int width, height, xleft, ytop;
205
206 #if CONFIG_AVFILTER
207     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
208 #endif
209
210     float skip_frames;
211     float skip_frames_index;
212     int refresh;
213 } VideoState;
214
215 static int opt_help(const char *opt, const char *arg);
216
217 /* options specified by the user */
218 static AVInputFormat *file_iformat;
219 static const char *input_filename;
220 static const char *window_title;
221 static int fs_screen_width;
222 static int fs_screen_height;
223 static int screen_width = 0;
224 static int screen_height = 0;
225 static int frame_width = 0;
226 static int frame_height = 0;
227 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
228 static int audio_disable;
229 static int video_disable;
230 static int wanted_stream[AVMEDIA_TYPE_NB]={
231     [AVMEDIA_TYPE_AUDIO]=-1,
232     [AVMEDIA_TYPE_VIDEO]=-1,
233     [AVMEDIA_TYPE_SUBTITLE]=-1,
234 };
235 static int seek_by_bytes=-1;
236 static int display_disable;
237 static int show_status = 1;
238 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
239 static int64_t start_time = AV_NOPTS_VALUE;
240 static int64_t duration = AV_NOPTS_VALUE;
241 static int step = 0;
242 static int thread_count = 1;
243 static int workaround_bugs = 1;
244 static int fast = 0;
245 static int genpts = 0;
246 static int lowres = 0;
247 static int idct = FF_IDCT_AUTO;
248 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
249 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
251 static int error_recognition = FF_ER_CAREFUL;
252 static int error_concealment = 3;
253 static int decoder_reorder_pts= -1;
254 static int autoexit;
255 static int exit_on_keydown;
256 static int exit_on_mousedown;
257 static int loop=1;
258 static int framedrop=-1;
259 static enum ShowMode show_mode = SHOW_MODE_NONE;
260
261 static int rdftspeed=20;
262 #if CONFIG_AVFILTER
263 static char *vfilters = NULL;
264 #endif
265
266 /* current context */
267 static int is_full_screen;
268 static VideoState *cur_stream;
269 static int64_t audio_callback_time;
270
271 static AVPacket flush_pkt;
272
273 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
274 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
275 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
276
277 static SDL_Surface *screen;
278
279 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
280 {
281     AVPacketList *pkt1;
282
283     /* duplicate the packet */
284     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
285         return -1;
286
287     pkt1 = av_malloc(sizeof(AVPacketList));
288     if (!pkt1)
289         return -1;
290     pkt1->pkt = *pkt;
291     pkt1->next = NULL;
292
293
294     SDL_LockMutex(q->mutex);
295
296     if (!q->last_pkt)
297
298         q->first_pkt = pkt1;
299     else
300         q->last_pkt->next = pkt1;
301     q->last_pkt = pkt1;
302     q->nb_packets++;
303     q->size += pkt1->pkt.size + sizeof(*pkt1);
304     /* XXX: should duplicate packet data in DV case */
305     SDL_CondSignal(q->cond);
306
307     SDL_UnlockMutex(q->mutex);
308     return 0;
309 }
310
311 /* packet queue handling */
312 static void packet_queue_init(PacketQueue *q)
313 {
314     memset(q, 0, sizeof(PacketQueue));
315     q->mutex = SDL_CreateMutex();
316     q->cond = SDL_CreateCond();
317     packet_queue_put(q, &flush_pkt);
318 }
319
320 static void packet_queue_flush(PacketQueue *q)
321 {
322     AVPacketList *pkt, *pkt1;
323
324     SDL_LockMutex(q->mutex);
325     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
326         pkt1 = pkt->next;
327         av_free_packet(&pkt->pkt);
328         av_freep(&pkt);
329     }
330     q->last_pkt = NULL;
331     q->first_pkt = NULL;
332     q->nb_packets = 0;
333     q->size = 0;
334     SDL_UnlockMutex(q->mutex);
335 }
336
337 static void packet_queue_end(PacketQueue *q)
338 {
339     packet_queue_flush(q);
340     SDL_DestroyMutex(q->mutex);
341     SDL_DestroyCond(q->cond);
342 }
343
344 static void packet_queue_abort(PacketQueue *q)
345 {
346     SDL_LockMutex(q->mutex);
347
348     q->abort_request = 1;
349
350     SDL_CondSignal(q->cond);
351
352     SDL_UnlockMutex(q->mutex);
353 }
354
355 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
356 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
357 {
358     AVPacketList *pkt1;
359     int ret;
360
361     SDL_LockMutex(q->mutex);
362
363     for(;;) {
364         if (q->abort_request) {
365             ret = -1;
366             break;
367         }
368
369         pkt1 = q->first_pkt;
370         if (pkt1) {
371             q->first_pkt = pkt1->next;
372             if (!q->first_pkt)
373                 q->last_pkt = NULL;
374             q->nb_packets--;
375             q->size -= pkt1->pkt.size + sizeof(*pkt1);
376             *pkt = pkt1->pkt;
377             av_free(pkt1);
378             ret = 1;
379             break;
380         } else if (!block) {
381             ret = 0;
382             break;
383         } else {
384             SDL_CondWait(q->cond, q->mutex);
385         }
386     }
387     SDL_UnlockMutex(q->mutex);
388     return ret;
389 }
390
391 static inline void fill_rectangle(SDL_Surface *screen,
392                                   int x, int y, int w, int h, int color)
393 {
394     SDL_Rect rect;
395     rect.x = x;
396     rect.y = y;
397     rect.w = w;
398     rect.h = h;
399     SDL_FillRect(screen, &rect, color);
400 }
401
402 #define ALPHA_BLEND(a, oldp, newp, s)\
403 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
404
405 #define RGBA_IN(r, g, b, a, s)\
406 {\
407     unsigned int v = ((const uint32_t *)(s))[0];\
408     a = (v >> 24) & 0xff;\
409     r = (v >> 16) & 0xff;\
410     g = (v >> 8) & 0xff;\
411     b = v & 0xff;\
412 }
413
414 #define YUVA_IN(y, u, v, a, s, pal)\
415 {\
416     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
417     a = (val >> 24) & 0xff;\
418     y = (val >> 16) & 0xff;\
419     u = (val >> 8) & 0xff;\
420     v = val & 0xff;\
421 }
422
423 #define YUVA_OUT(d, y, u, v, a)\
424 {\
425     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
426 }
427
428
429 #define BPP 1
430
431 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
432 {
433     int wrap, wrap3, width2, skip2;
434     int y, u, v, a, u1, v1, a1, w, h;
435     uint8_t *lum, *cb, *cr;
436     const uint8_t *p;
437     const uint32_t *pal;
438     int dstx, dsty, dstw, dsth;
439
440     dstw = av_clip(rect->w, 0, imgw);
441     dsth = av_clip(rect->h, 0, imgh);
442     dstx = av_clip(rect->x, 0, imgw - dstw);
443     dsty = av_clip(rect->y, 0, imgh - dsth);
444     lum = dst->data[0] + dsty * dst->linesize[0];
445     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
446     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
447
448     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
449     skip2 = dstx >> 1;
450     wrap = dst->linesize[0];
451     wrap3 = rect->pict.linesize[0];
452     p = rect->pict.data[0];
453     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
454
455     if (dsty & 1) {
456         lum += dstx;
457         cb += skip2;
458         cr += skip2;
459
460         if (dstx & 1) {
461             YUVA_IN(y, u, v, a, p, pal);
462             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
463             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
464             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
465             cb++;
466             cr++;
467             lum++;
468             p += BPP;
469         }
470         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
471             YUVA_IN(y, u, v, a, p, pal);
472             u1 = u;
473             v1 = v;
474             a1 = a;
475             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
476
477             YUVA_IN(y, u, v, a, p + BPP, pal);
478             u1 += u;
479             v1 += v;
480             a1 += a;
481             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
482             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
483             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
484             cb++;
485             cr++;
486             p += 2 * BPP;
487             lum += 2;
488         }
489         if (w) {
490             YUVA_IN(y, u, v, a, p, pal);
491             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
492             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
493             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
494             p++;
495             lum++;
496         }
497         p += wrap3 - dstw * BPP;
498         lum += wrap - dstw - dstx;
499         cb += dst->linesize[1] - width2 - skip2;
500         cr += dst->linesize[2] - width2 - skip2;
501     }
502     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
503         lum += dstx;
504         cb += skip2;
505         cr += skip2;
506
507         if (dstx & 1) {
508             YUVA_IN(y, u, v, a, p, pal);
509             u1 = u;
510             v1 = v;
511             a1 = a;
512             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513             p += wrap3;
514             lum += wrap;
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 += u;
517             v1 += v;
518             a1 += a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
521             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
522             cb++;
523             cr++;
524             p += -wrap3 + BPP;
525             lum += -wrap + 1;
526         }
527         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
528             YUVA_IN(y, u, v, a, p, pal);
529             u1 = u;
530             v1 = v;
531             a1 = a;
532             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533
534             YUVA_IN(y, u, v, a, p + BPP, pal);
535             u1 += u;
536             v1 += v;
537             a1 += a;
538             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
539             p += wrap3;
540             lum += wrap;
541
542             YUVA_IN(y, u, v, a, p, pal);
543             u1 += u;
544             v1 += v;
545             a1 += a;
546             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
547
548             YUVA_IN(y, u, v, a, p + BPP, pal);
549             u1 += u;
550             v1 += v;
551             a1 += a;
552             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
553
554             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
555             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
556
557             cb++;
558             cr++;
559             p += -wrap3 + 2 * BPP;
560             lum += -wrap + 2;
561         }
562         if (w) {
563             YUVA_IN(y, u, v, a, p, pal);
564             u1 = u;
565             v1 = v;
566             a1 = a;
567             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568             p += wrap3;
569             lum += wrap;
570             YUVA_IN(y, u, v, a, p, pal);
571             u1 += u;
572             v1 += v;
573             a1 += a;
574             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
576             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
577             cb++;
578             cr++;
579             p += -wrap3 + BPP;
580             lum += -wrap + 1;
581         }
582         p += wrap3 + (wrap3 - dstw * BPP);
583         lum += wrap + (wrap - dstw - dstx);
584         cb += dst->linesize[1] - width2 - skip2;
585         cr += dst->linesize[2] - width2 - skip2;
586     }
587     /* handle odd height */
588     if (h) {
589         lum += dstx;
590         cb += skip2;
591         cr += skip2;
592
593         if (dstx & 1) {
594             YUVA_IN(y, u, v, a, p, pal);
595             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
597             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
598             cb++;
599             cr++;
600             lum++;
601             p += BPP;
602         }
603         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
604             YUVA_IN(y, u, v, a, p, pal);
605             u1 = u;
606             v1 = v;
607             a1 = a;
608             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
609
610             YUVA_IN(y, u, v, a, p + BPP, pal);
611             u1 += u;
612             v1 += v;
613             a1 += a;
614             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
615             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
616             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
617             cb++;
618             cr++;
619             p += 2 * BPP;
620             lum += 2;
621         }
622         if (w) {
623             YUVA_IN(y, u, v, a, p, pal);
624             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
626             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
627         }
628     }
629 }
630
631 static void free_subpicture(SubPicture *sp)
632 {
633     avsubtitle_free(&sp->sub);
634 }
635
636 static void video_image_display(VideoState *is)
637 {
638     VideoPicture *vp;
639     SubPicture *sp;
640     AVPicture pict;
641     float aspect_ratio;
642     int width, height, x, y;
643     SDL_Rect rect;
644     int i;
645
646     vp = &is->pictq[is->pictq_rindex];
647     if (vp->bmp) {
648 #if CONFIG_AVFILTER
649          if (vp->picref->video->sample_aspect_ratio.num == 0)
650              aspect_ratio = 0;
651          else
652              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
653 #else
654
655         /* XXX: use variable in the frame */
656         if (is->video_st->sample_aspect_ratio.num)
657             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
658         else if (is->video_st->codec->sample_aspect_ratio.num)
659             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
660         else
661             aspect_ratio = 0;
662 #endif
663         if (aspect_ratio <= 0.0)
664             aspect_ratio = 1.0;
665         aspect_ratio *= (float)vp->width / (float)vp->height;
666
667         if (is->subtitle_st) {
668             if (is->subpq_size > 0) {
669                 sp = &is->subpq[is->subpq_rindex];
670
671                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
672                     SDL_LockYUVOverlay (vp->bmp);
673
674                     pict.data[0] = vp->bmp->pixels[0];
675                     pict.data[1] = vp->bmp->pixels[2];
676                     pict.data[2] = vp->bmp->pixels[1];
677
678                     pict.linesize[0] = vp->bmp->pitches[0];
679                     pict.linesize[1] = vp->bmp->pitches[2];
680                     pict.linesize[2] = vp->bmp->pitches[1];
681
682                     for (i = 0; i < sp->sub.num_rects; i++)
683                         blend_subrect(&pict, sp->sub.rects[i],
684                                       vp->bmp->w, vp->bmp->h);
685
686                     SDL_UnlockYUVOverlay (vp->bmp);
687                 }
688             }
689         }
690
691
692         /* XXX: we suppose the screen has a 1.0 pixel ratio */
693         height = is->height;
694         width = ((int)rint(height * aspect_ratio)) & ~1;
695         if (width > is->width) {
696             width = is->width;
697             height = ((int)rint(width / aspect_ratio)) & ~1;
698         }
699         x = (is->width - width) / 2;
700         y = (is->height - height) / 2;
701         is->no_background = 0;
702         rect.x = is->xleft + x;
703         rect.y = is->ytop  + y;
704         rect.w = FFMAX(width,  1);
705         rect.h = FFMAX(height, 1);
706         SDL_DisplayYUVOverlay(vp->bmp, &rect);
707     }
708 }
709
710 /* get the current audio output buffer size, in samples. With SDL, we
711    cannot have a precise information */
712 static int audio_write_get_buf_size(VideoState *is)
713 {
714     return is->audio_buf_size - is->audio_buf_index;
715 }
716
717 static inline int compute_mod(int a, int b)
718 {
719     return a < 0 ? a%b + b : a%b;
720 }
721
722 static void video_audio_display(VideoState *s)
723 {
724     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
725     int ch, channels, h, h2, bgcolor, fgcolor;
726     int16_t time_diff;
727     int rdft_bits, nb_freq;
728
729     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
730         ;
731     nb_freq= 1<<(rdft_bits-1);
732
733     /* compute display index : center on currently output samples */
734     channels = s->audio_st->codec->channels;
735     nb_display_channels = channels;
736     if (!s->paused) {
737         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
738         n = 2 * channels;
739         delay = audio_write_get_buf_size(s);
740         delay /= n;
741
742         /* to be more precise, we take into account the time spent since
743            the last buffer computation */
744         if (audio_callback_time) {
745             time_diff = av_gettime() - audio_callback_time;
746             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
747         }
748
749         delay += 2*data_used;
750         if (delay < data_used)
751             delay = data_used;
752
753         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
754         if (s->show_mode == SHOW_MODE_WAVES) {
755             h= INT_MIN;
756             for(i=0; i<1000; i+=channels){
757                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
758                 int a= s->sample_array[idx];
759                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
760                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
761                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
762                 int score= a-d;
763                 if(h<score && (b^c)<0){
764                     h= score;
765                     i_start= idx;
766                 }
767             }
768         }
769
770         s->last_i_start = i_start;
771     } else {
772         i_start = s->last_i_start;
773     }
774
775     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
776     if (s->show_mode == SHOW_MODE_WAVES) {
777         fill_rectangle(screen,
778                        s->xleft, s->ytop, s->width, s->height,
779                        bgcolor);
780
781         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
782
783         /* total height for one channel */
784         h = s->height / nb_display_channels;
785         /* graph height / 2 */
786         h2 = (h * 9) / 20;
787         for(ch = 0;ch < nb_display_channels; ch++) {
788             i = i_start + ch;
789             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
790             for(x = 0; x < s->width; x++) {
791                 y = (s->sample_array[i] * h2) >> 15;
792                 if (y < 0) {
793                     y = -y;
794                     ys = y1 - y;
795                 } else {
796                     ys = y1;
797                 }
798                 fill_rectangle(screen,
799                                s->xleft + x, ys, 1, y,
800                                fgcolor);
801                 i += channels;
802                 if (i >= SAMPLE_ARRAY_SIZE)
803                     i -= SAMPLE_ARRAY_SIZE;
804             }
805         }
806
807         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
808
809         for(ch = 1;ch < nb_display_channels; ch++) {
810             y = s->ytop + ch * h;
811             fill_rectangle(screen,
812                            s->xleft, y, s->width, 1,
813                            fgcolor);
814         }
815         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
816     }else{
817         nb_display_channels= FFMIN(nb_display_channels, 2);
818         if(rdft_bits != s->rdft_bits){
819             av_rdft_end(s->rdft);
820             av_free(s->rdft_data);
821             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
822             s->rdft_bits= rdft_bits;
823             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
824         }
825         {
826             FFTSample *data[2];
827             for(ch = 0;ch < nb_display_channels; ch++) {
828                 data[ch] = s->rdft_data + 2*nb_freq*ch;
829                 i = i_start + ch;
830                 for(x = 0; x < 2*nb_freq; x++) {
831                     double w= (x-nb_freq)*(1.0/nb_freq);
832                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
833                     i += channels;
834                     if (i >= SAMPLE_ARRAY_SIZE)
835                         i -= SAMPLE_ARRAY_SIZE;
836                 }
837                 av_rdft_calc(s->rdft, data[ch]);
838             }
839             //least efficient way to do this, we should of course directly access it but its more than fast enough
840             for(y=0; y<s->height; y++){
841                 double w= 1/sqrt(nb_freq);
842                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
843                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
844                        + data[1][2*y+1]*data[1][2*y+1])) : a;
845                 a= FFMIN(a,255);
846                 b= FFMIN(b,255);
847                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
848
849                 fill_rectangle(screen,
850                             s->xpos, s->height-y, 1, 1,
851                             fgcolor);
852             }
853         }
854         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
855         s->xpos++;
856         if(s->xpos >= s->width)
857             s->xpos= s->xleft;
858     }
859 }
860
861 static void stream_close(VideoState *is)
862 {
863     VideoPicture *vp;
864     int i;
865     /* XXX: use a special url_shutdown call to abort parse cleanly */
866     is->abort_request = 1;
867     SDL_WaitThread(is->read_tid, NULL);
868     SDL_WaitThread(is->refresh_tid, NULL);
869
870     /* free all pictures */
871     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
872         vp = &is->pictq[i];
873 #if CONFIG_AVFILTER
874         if (vp->picref) {
875             avfilter_unref_buffer(vp->picref);
876             vp->picref = NULL;
877         }
878 #endif
879         if (vp->bmp) {
880             SDL_FreeYUVOverlay(vp->bmp);
881             vp->bmp = NULL;
882         }
883     }
884     SDL_DestroyMutex(is->pictq_mutex);
885     SDL_DestroyCond(is->pictq_cond);
886     SDL_DestroyMutex(is->subpq_mutex);
887     SDL_DestroyCond(is->subpq_cond);
888 #if !CONFIG_AVFILTER
889     if (is->img_convert_ctx)
890         sws_freeContext(is->img_convert_ctx);
891 #endif
892     av_free(is);
893 }
894
895 static void do_exit(void)
896 {
897     if (cur_stream) {
898         stream_close(cur_stream);
899         cur_stream = NULL;
900     }
901     uninit_opts();
902 #if CONFIG_AVFILTER
903     avfilter_uninit();
904 #endif
905     if (show_status)
906         printf("\n");
907     SDL_Quit();
908     av_log(NULL, AV_LOG_QUIET, "%s", "");
909     exit(0);
910 }
911
912 static int video_open(VideoState *is){
913     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
914     int w,h;
915
916     if(is_full_screen) flags |= SDL_FULLSCREEN;
917     else               flags |= SDL_RESIZABLE;
918
919     if (is_full_screen && fs_screen_width) {
920         w = fs_screen_width;
921         h = fs_screen_height;
922     } else if(!is_full_screen && screen_width){
923         w = screen_width;
924         h = screen_height;
925 #if CONFIG_AVFILTER
926     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
927         w = is->out_video_filter->inputs[0]->w;
928         h = is->out_video_filter->inputs[0]->h;
929 #else
930     }else if (is->video_st && is->video_st->codec->width){
931         w = is->video_st->codec->width;
932         h = is->video_st->codec->height;
933 #endif
934     } else {
935         w = 640;
936         h = 480;
937     }
938     if(screen && is->width == screen->w && screen->w == w
939        && is->height== screen->h && screen->h == h)
940         return 0;
941
942 #ifndef __APPLE__
943     screen = SDL_SetVideoMode(w, h, 0, flags);
944 #else
945     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
946     screen = SDL_SetVideoMode(w, h, 24, flags);
947 #endif
948     if (!screen) {
949         fprintf(stderr, "SDL: could not set video mode - exiting\n");
950         do_exit();
951     }
952     if (!window_title)
953         window_title = input_filename;
954     SDL_WM_SetCaption(window_title, window_title);
955
956     is->width = screen->w;
957     is->height = screen->h;
958
959     return 0;
960 }
961
962 /* display the current picture, if any */
963 static void video_display(VideoState *is)
964 {
965     if(!screen)
966         video_open(cur_stream);
967     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
968         video_audio_display(is);
969     else if (is->video_st)
970         video_image_display(is);
971 }
972
973 static int refresh_thread(void *opaque)
974 {
975     VideoState *is= opaque;
976     while(!is->abort_request){
977         SDL_Event event;
978         event.type = FF_REFRESH_EVENT;
979         event.user.data1 = opaque;
980         if(!is->refresh){
981             is->refresh=1;
982             SDL_PushEvent(&event);
983         }
984         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
985         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
986     }
987     return 0;
988 }
989
990 /* get the current audio clock value */
991 static double get_audio_clock(VideoState *is)
992 {
993     double pts;
994     int hw_buf_size, bytes_per_sec;
995     pts = is->audio_clock;
996     hw_buf_size = audio_write_get_buf_size(is);
997     bytes_per_sec = 0;
998     if (is->audio_st) {
999         bytes_per_sec = is->audio_st->codec->sample_rate *
1000             2 * is->audio_st->codec->channels;
1001     }
1002     if (bytes_per_sec)
1003         pts -= (double)hw_buf_size / bytes_per_sec;
1004     return pts;
1005 }
1006
1007 /* get the current video clock value */
1008 static double get_video_clock(VideoState *is)
1009 {
1010     if (is->paused) {
1011         return is->video_current_pts;
1012     } else {
1013         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1014     }
1015 }
1016
1017 /* get the current external clock value */
1018 static double get_external_clock(VideoState *is)
1019 {
1020     int64_t ti;
1021     ti = av_gettime();
1022     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1023 }
1024
1025 /* get the current master clock value */
1026 static double get_master_clock(VideoState *is)
1027 {
1028     double val;
1029
1030     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1031         if (is->video_st)
1032             val = get_video_clock(is);
1033         else
1034             val = get_audio_clock(is);
1035     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1036         if (is->audio_st)
1037             val = get_audio_clock(is);
1038         else
1039             val = get_video_clock(is);
1040     } else {
1041         val = get_external_clock(is);
1042     }
1043     return val;
1044 }
1045
1046 /* seek in the stream */
1047 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1048 {
1049     if (!is->seek_req) {
1050         is->seek_pos = pos;
1051         is->seek_rel = rel;
1052         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1053         if (seek_by_bytes)
1054             is->seek_flags |= AVSEEK_FLAG_BYTE;
1055         is->seek_req = 1;
1056     }
1057 }
1058
1059 /* pause or resume the video */
1060 static void stream_toggle_pause(VideoState *is)
1061 {
1062     if (is->paused) {
1063         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1064         if(is->read_pause_return != AVERROR(ENOSYS)){
1065             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1066         }
1067         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1068     }
1069     is->paused = !is->paused;
1070 }
1071
1072 static double compute_target_time(double frame_current_pts, VideoState *is)
1073 {
1074     double delay, sync_threshold, diff;
1075
1076     /* compute nominal delay */
1077     delay = frame_current_pts - is->frame_last_pts;
1078     if (delay <= 0 || delay >= 10.0) {
1079         /* if incorrect delay, use previous one */
1080         delay = is->frame_last_delay;
1081     } else {
1082         is->frame_last_delay = delay;
1083     }
1084     is->frame_last_pts = frame_current_pts;
1085
1086     /* update delay to follow master synchronisation source */
1087     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1088          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1089         /* if video is slave, we try to correct big delays by
1090            duplicating or deleting a frame */
1091         diff = get_video_clock(is) - get_master_clock(is);
1092
1093         /* skip or repeat frame. We take into account the
1094            delay to compute the threshold. I still don't know
1095            if it is the best guess */
1096         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1097         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1098             if (diff <= -sync_threshold)
1099                 delay = 0;
1100             else if (diff >= sync_threshold)
1101                 delay = 2 * delay;
1102         }
1103     }
1104     is->frame_timer += delay;
1105
1106     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1107             delay, frame_current_pts, -diff);
1108
1109     return is->frame_timer;
1110 }
1111
1112 /* called to display each frame */
1113 static void video_refresh(void *opaque)
1114 {
1115     VideoState *is = opaque;
1116     VideoPicture *vp;
1117
1118     SubPicture *sp, *sp2;
1119
1120     if (is->video_st) {
1121 retry:
1122         if (is->pictq_size == 0) {
1123             //nothing to do, no picture to display in the que
1124         } else {
1125             double time= av_gettime()/1000000.0;
1126             double next_target;
1127             /* dequeue the picture */
1128             vp = &is->pictq[is->pictq_rindex];
1129
1130             if(time < vp->target_clock)
1131                 return;
1132             /* update current video pts */
1133             is->video_current_pts = vp->pts;
1134             is->video_current_pts_drift = is->video_current_pts - time;
1135             is->video_current_pos = vp->pos;
1136             if(is->pictq_size > 1){
1137                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1138                 assert(nextvp->target_clock >= vp->target_clock);
1139                 next_target= nextvp->target_clock;
1140             }else{
1141                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1142             }
1143             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1144                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1145                 if(is->pictq_size > 1 || time > next_target + 0.5){
1146                     /* update queue size and signal for next picture */
1147                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1148                         is->pictq_rindex = 0;
1149
1150                     SDL_LockMutex(is->pictq_mutex);
1151                     is->pictq_size--;
1152                     SDL_CondSignal(is->pictq_cond);
1153                     SDL_UnlockMutex(is->pictq_mutex);
1154                     goto retry;
1155                 }
1156             }
1157
1158             if(is->subtitle_st) {
1159                 if (is->subtitle_stream_changed) {
1160                     SDL_LockMutex(is->subpq_mutex);
1161
1162                     while (is->subpq_size) {
1163                         free_subpicture(&is->subpq[is->subpq_rindex]);
1164
1165                         /* update queue size and signal for next picture */
1166                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1167                             is->subpq_rindex = 0;
1168
1169                         is->subpq_size--;
1170                     }
1171                     is->subtitle_stream_changed = 0;
1172
1173                     SDL_CondSignal(is->subpq_cond);
1174                     SDL_UnlockMutex(is->subpq_mutex);
1175                 } else {
1176                     if (is->subpq_size > 0) {
1177                         sp = &is->subpq[is->subpq_rindex];
1178
1179                         if (is->subpq_size > 1)
1180                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1181                         else
1182                             sp2 = NULL;
1183
1184                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1185                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1186                         {
1187                             free_subpicture(sp);
1188
1189                             /* update queue size and signal for next picture */
1190                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1191                                 is->subpq_rindex = 0;
1192
1193                             SDL_LockMutex(is->subpq_mutex);
1194                             is->subpq_size--;
1195                             SDL_CondSignal(is->subpq_cond);
1196                             SDL_UnlockMutex(is->subpq_mutex);
1197                         }
1198                     }
1199                 }
1200             }
1201
1202             /* display picture */
1203             if (!display_disable)
1204                 video_display(is);
1205
1206             /* update queue size and signal for next picture */
1207             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1208                 is->pictq_rindex = 0;
1209
1210             SDL_LockMutex(is->pictq_mutex);
1211             is->pictq_size--;
1212             SDL_CondSignal(is->pictq_cond);
1213             SDL_UnlockMutex(is->pictq_mutex);
1214         }
1215     } else if (is->audio_st) {
1216         /* draw the next audio frame */
1217
1218         /* if only audio stream, then display the audio bars (better
1219            than nothing, just to test the implementation */
1220
1221         /* display picture */
1222         if (!display_disable)
1223             video_display(is);
1224     }
1225     if (show_status) {
1226         static int64_t last_time;
1227         int64_t cur_time;
1228         int aqsize, vqsize, sqsize;
1229         double av_diff;
1230
1231         cur_time = av_gettime();
1232         if (!last_time || (cur_time - last_time) >= 30000) {
1233             aqsize = 0;
1234             vqsize = 0;
1235             sqsize = 0;
1236             if (is->audio_st)
1237                 aqsize = is->audioq.size;
1238             if (is->video_st)
1239                 vqsize = is->videoq.size;
1240             if (is->subtitle_st)
1241                 sqsize = is->subtitleq.size;
1242             av_diff = 0;
1243             if (is->audio_st && is->video_st)
1244                 av_diff = get_audio_clock(is) - get_video_clock(is);
1245             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1246                    get_master_clock(is),
1247                    av_diff,
1248                    FFMAX(is->skip_frames-1, 0),
1249                    aqsize / 1024,
1250                    vqsize / 1024,
1251                    sqsize,
1252                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1253                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1254             fflush(stdout);
1255             last_time = cur_time;
1256         }
1257     }
1258 }
1259
1260 /* allocate a picture (needs to do that in main thread to avoid
1261    potential locking problems */
1262 static void alloc_picture(void *opaque)
1263 {
1264     VideoState *is = opaque;
1265     VideoPicture *vp;
1266
1267     vp = &is->pictq[is->pictq_windex];
1268
1269     if (vp->bmp)
1270         SDL_FreeYUVOverlay(vp->bmp);
1271
1272 #if CONFIG_AVFILTER
1273     if (vp->picref)
1274         avfilter_unref_buffer(vp->picref);
1275     vp->picref = NULL;
1276
1277     vp->width   = is->out_video_filter->inputs[0]->w;
1278     vp->height  = is->out_video_filter->inputs[0]->h;
1279     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1280 #else
1281     vp->width   = is->video_st->codec->width;
1282     vp->height  = is->video_st->codec->height;
1283     vp->pix_fmt = is->video_st->codec->pix_fmt;
1284 #endif
1285
1286     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1287                                    SDL_YV12_OVERLAY,
1288                                    screen);
1289     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1290         /* SDL allocates a buffer smaller than requested if the video
1291          * overlay hardware is unable to support the requested size. */
1292         fprintf(stderr, "Error: the video system does not support an image\n"
1293                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1294                         "to reduce the image size.\n", vp->width, vp->height );
1295         do_exit();
1296     }
1297
1298     SDL_LockMutex(is->pictq_mutex);
1299     vp->allocated = 1;
1300     SDL_CondSignal(is->pictq_cond);
1301     SDL_UnlockMutex(is->pictq_mutex);
1302 }
1303
1304 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1305 {
1306     VideoPicture *vp;
1307     double frame_delay, pts = pts1;
1308
1309     /* compute the exact PTS for the picture if it is omitted in the stream
1310      * pts1 is the dts of the pkt / pts of the frame */
1311     if (pts != 0) {
1312         /* update video clock with pts, if present */
1313         is->video_clock = pts;
1314     } else {
1315         pts = is->video_clock;
1316     }
1317     /* update video clock for next frame */
1318     frame_delay = av_q2d(is->video_st->codec->time_base);
1319     /* for MPEG2, the frame can be repeated, so we update the
1320        clock accordingly */
1321     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1322     is->video_clock += frame_delay;
1323
1324 #if defined(DEBUG_SYNC) && 0
1325     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1326            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1327 #endif
1328
1329     /* wait until we have space to put a new picture */
1330     SDL_LockMutex(is->pictq_mutex);
1331
1332     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1333         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1334
1335     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1336            !is->videoq.abort_request) {
1337         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1338     }
1339     SDL_UnlockMutex(is->pictq_mutex);
1340
1341     if (is->videoq.abort_request)
1342         return -1;
1343
1344     vp = &is->pictq[is->pictq_windex];
1345
1346     /* alloc or resize hardware picture buffer */
1347     if (!vp->bmp ||
1348 #if CONFIG_AVFILTER
1349         vp->width  != is->out_video_filter->inputs[0]->w ||
1350         vp->height != is->out_video_filter->inputs[0]->h) {
1351 #else
1352         vp->width != is->video_st->codec->width ||
1353         vp->height != is->video_st->codec->height) {
1354 #endif
1355         SDL_Event event;
1356
1357         vp->allocated = 0;
1358
1359         /* the allocation must be done in the main thread to avoid
1360            locking problems */
1361         event.type = FF_ALLOC_EVENT;
1362         event.user.data1 = is;
1363         SDL_PushEvent(&event);
1364
1365         /* wait until the picture is allocated */
1366         SDL_LockMutex(is->pictq_mutex);
1367         while (!vp->allocated && !is->videoq.abort_request) {
1368             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1369         }
1370         SDL_UnlockMutex(is->pictq_mutex);
1371
1372         if (is->videoq.abort_request)
1373             return -1;
1374     }
1375
1376     /* if the frame is not skipped, then display it */
1377     if (vp->bmp) {
1378         AVPicture pict;
1379 #if CONFIG_AVFILTER
1380         if(vp->picref)
1381             avfilter_unref_buffer(vp->picref);
1382         vp->picref = src_frame->opaque;
1383 #endif
1384
1385         /* get a pointer on the bitmap */
1386         SDL_LockYUVOverlay (vp->bmp);
1387
1388         memset(&pict,0,sizeof(AVPicture));
1389         pict.data[0] = vp->bmp->pixels[0];
1390         pict.data[1] = vp->bmp->pixels[2];
1391         pict.data[2] = vp->bmp->pixels[1];
1392
1393         pict.linesize[0] = vp->bmp->pitches[0];
1394         pict.linesize[1] = vp->bmp->pitches[2];
1395         pict.linesize[2] = vp->bmp->pitches[1];
1396
1397 #if CONFIG_AVFILTER
1398         //FIXME use direct rendering
1399         av_picture_copy(&pict, (AVPicture *)src_frame,
1400                         vp->pix_fmt, vp->width, vp->height);
1401 #else
1402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1405             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1406         if (is->img_convert_ctx == NULL) {
1407             fprintf(stderr, "Cannot initialize the conversion context\n");
1408             exit(1);
1409         }
1410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1411                   0, vp->height, pict.data, pict.linesize);
1412 #endif
1413         /* update the bitmap content */
1414         SDL_UnlockYUVOverlay(vp->bmp);
1415
1416         vp->pts = pts;
1417         vp->pos = pos;
1418
1419         /* now we can update the picture count */
1420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1421             is->pictq_windex = 0;
1422         SDL_LockMutex(is->pictq_mutex);
1423         vp->target_clock= compute_target_time(vp->pts, is);
1424
1425         is->pictq_size++;
1426         SDL_UnlockMutex(is->pictq_mutex);
1427     }
1428     return 0;
1429 }
1430
1431 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1432 {
1433     int len1 av_unused, got_picture, i;
1434
1435     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1436         return -1;
1437
1438     if (pkt->data == flush_pkt.data) {
1439         avcodec_flush_buffers(is->video_st->codec);
1440
1441         SDL_LockMutex(is->pictq_mutex);
1442         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1443         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1444             is->pictq[i].target_clock= 0;
1445         }
1446         while (is->pictq_size && !is->videoq.abort_request) {
1447             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1448         }
1449         is->video_current_pos = -1;
1450         SDL_UnlockMutex(is->pictq_mutex);
1451
1452         is->frame_last_pts = AV_NOPTS_VALUE;
1453         is->frame_last_delay = 0;
1454         is->frame_timer = (double)av_gettime() / 1000000.0;
1455         is->skip_frames = 1;
1456         is->skip_frames_index = 0;
1457         return 0;
1458     }
1459
1460     len1 = avcodec_decode_video2(is->video_st->codec,
1461                                  frame, &got_picture,
1462                                  pkt);
1463
1464     if (got_picture) {
1465         if (decoder_reorder_pts == -1) {
1466             *pts = frame->best_effort_timestamp;
1467         } else if (decoder_reorder_pts) {
1468             *pts = frame->pkt_pts;
1469         } else {
1470             *pts = frame->pkt_dts;
1471         }
1472
1473         if (*pts == AV_NOPTS_VALUE) {
1474             *pts = 0;
1475         }
1476
1477         is->skip_frames_index += 1;
1478         if(is->skip_frames_index >= is->skip_frames){
1479             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1480             return 1;
1481         }
1482
1483     }
1484     return 0;
1485 }
1486
1487 #if CONFIG_AVFILTER
1488 typedef struct {
1489     VideoState *is;
1490     AVFrame *frame;
1491     int use_dr1;
1492 } FilterPriv;
1493
1494 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1495 {
1496     AVFilterContext *ctx = codec->opaque;
1497     AVFilterBufferRef  *ref;
1498     int perms = AV_PERM_WRITE;
1499     int i, w, h, stride[4];
1500     unsigned edge;
1501     int pixel_size;
1502
1503     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1504
1505     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1506         perms |= AV_PERM_NEG_LINESIZES;
1507
1508     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1511         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1512     }
1513     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1514
1515     w = codec->width;
1516     h = codec->height;
1517
1518     if(av_image_check_size(w, h, 0, codec))
1519         return -1;
1520
1521     avcodec_align_dimensions2(codec, &w, &h, stride);
1522     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1523     w += edge << 1;
1524     h += edge << 1;
1525
1526     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1527         return -1;
1528
1529     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1530     ref->video->w = codec->width;
1531     ref->video->h = codec->height;
1532     for(i = 0; i < 4; i ++) {
1533         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1534         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1535
1536         if (ref->data[i]) {
1537             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1538         }
1539         pic->data[i]     = ref->data[i];
1540         pic->linesize[i] = ref->linesize[i];
1541     }
1542     pic->opaque = ref;
1543     pic->age    = INT_MAX;
1544     pic->type   = FF_BUFFER_TYPE_USER;
1545     pic->reordered_opaque = codec->reordered_opaque;
1546     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1547     else           pic->pkt_pts = AV_NOPTS_VALUE;
1548     return 0;
1549 }
1550
1551 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1552 {
1553     memset(pic->data, 0, sizeof(pic->data));
1554     avfilter_unref_buffer(pic->opaque);
1555 }
1556
1557 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1558 {
1559     AVFilterBufferRef *ref = pic->opaque;
1560
1561     if (pic->data[0] == NULL) {
1562         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1563         return codec->get_buffer(codec, pic);
1564     }
1565
1566     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1567         (codec->pix_fmt != ref->format)) {
1568         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1569         return -1;
1570     }
1571
1572     pic->reordered_opaque = codec->reordered_opaque;
1573     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1574     else           pic->pkt_pts = AV_NOPTS_VALUE;
1575     return 0;
1576 }
1577
1578 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1579 {
1580     FilterPriv *priv = ctx->priv;
1581     AVCodecContext *codec;
1582     if(!opaque) return -1;
1583
1584     priv->is = opaque;
1585     codec    = priv->is->video_st->codec;
1586     codec->opaque = ctx;
1587     if((codec->codec->capabilities & CODEC_CAP_DR1)
1588     ) {
1589         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1590         priv->use_dr1 = 1;
1591         codec->get_buffer     = input_get_buffer;
1592         codec->release_buffer = input_release_buffer;
1593         codec->reget_buffer   = input_reget_buffer;
1594         codec->thread_safe_callbacks = 1;
1595     }
1596
1597     priv->frame = avcodec_alloc_frame();
1598
1599     return 0;
1600 }
1601
1602 static void input_uninit(AVFilterContext *ctx)
1603 {
1604     FilterPriv *priv = ctx->priv;
1605     av_free(priv->frame);
1606 }
1607
1608 static int input_request_frame(AVFilterLink *link)
1609 {
1610     FilterPriv *priv = link->src->priv;
1611     AVFilterBufferRef *picref;
1612     int64_t pts = 0;
1613     AVPacket pkt;
1614     int ret;
1615
1616     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1617         av_free_packet(&pkt);
1618     if (ret < 0)
1619         return -1;
1620
1621     if(priv->use_dr1 && priv->frame->opaque) {
1622         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1623     } else {
1624         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1625         av_image_copy(picref->data, picref->linesize,
1626                       priv->frame->data, priv->frame->linesize,
1627                       picref->format, link->w, link->h);
1628     }
1629     av_free_packet(&pkt);
1630
1631     avfilter_copy_frame_props(picref, priv->frame);
1632     picref->pts = pts;
1633
1634     avfilter_start_frame(link, picref);
1635     avfilter_draw_slice(link, 0, link->h, 1);
1636     avfilter_end_frame(link);
1637
1638     return 0;
1639 }
1640
1641 static int input_query_formats(AVFilterContext *ctx)
1642 {
1643     FilterPriv *priv = ctx->priv;
1644     enum PixelFormat pix_fmts[] = {
1645         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1646     };
1647
1648     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1649     return 0;
1650 }
1651
1652 static int input_config_props(AVFilterLink *link)
1653 {
1654     FilterPriv *priv  = link->src->priv;
1655     AVCodecContext *c = priv->is->video_st->codec;
1656
1657     link->w = c->width;
1658     link->h = c->height;
1659     link->time_base = priv->is->video_st->time_base;
1660
1661     return 0;
1662 }
1663
1664 static AVFilter input_filter =
1665 {
1666     .name      = "ffplay_input",
1667
1668     .priv_size = sizeof(FilterPriv),
1669
1670     .init      = input_init,
1671     .uninit    = input_uninit,
1672
1673     .query_formats = input_query_formats,
1674
1675     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1676     .outputs   = (AVFilterPad[]) {{ .name = "default",
1677                                     .type = AVMEDIA_TYPE_VIDEO,
1678                                     .request_frame = input_request_frame,
1679                                     .config_props  = input_config_props, },
1680                                   { .name = NULL }},
1681 };
1682
1683 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1684 {
1685     char sws_flags_str[128];
1686     int ret;
1687     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1688     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1689     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1690     graph->scale_sws_opts = av_strdup(sws_flags_str);
1691
1692     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1693                                             NULL, is, graph)) < 0)
1694         goto the_end;
1695     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1696                                             NULL, pix_fmts, graph)) < 0)
1697         goto the_end;
1698
1699     if(vfilters) {
1700         AVFilterInOut *outputs = avfilter_inout_alloc();
1701         AVFilterInOut *inputs  = avfilter_inout_alloc();
1702
1703         outputs->name    = av_strdup("in");
1704         outputs->filter_ctx = filt_src;
1705         outputs->pad_idx = 0;
1706         outputs->next    = NULL;
1707
1708         inputs->name    = av_strdup("out");
1709         inputs->filter_ctx = filt_out;
1710         inputs->pad_idx = 0;
1711         inputs->next    = NULL;
1712
1713         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1714             goto the_end;
1715         av_freep(&vfilters);
1716     } else {
1717         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1718             goto the_end;
1719     }
1720
1721     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1722         goto the_end;
1723
1724     is->out_video_filter = filt_out;
1725 the_end:
1726     return ret;
1727 }
1728
1729 #endif  /* CONFIG_AVFILTER */
1730
1731 static int video_thread(void *arg)
1732 {
1733     VideoState *is = arg;
1734     AVFrame *frame= avcodec_alloc_frame();
1735     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1736     double pts;
1737     int ret;
1738
1739 #if CONFIG_AVFILTER
1740     AVFilterGraph *graph = avfilter_graph_alloc();
1741     AVFilterContext *filt_out = NULL;
1742
1743     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1744         goto the_end;
1745     filt_out = is->out_video_filter;
1746 #endif
1747
1748     for(;;) {
1749 #if !CONFIG_AVFILTER
1750         AVPacket pkt;
1751 #else
1752         AVFilterBufferRef *picref;
1753         AVRational tb = filt_out->inputs[0]->time_base;
1754 #endif
1755         while (is->paused && !is->videoq.abort_request)
1756             SDL_Delay(10);
1757 #if CONFIG_AVFILTER
1758         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1759         if (picref) {
1760             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1761             pts_int = picref->pts;
1762             pos     = picref->pos;
1763             frame->opaque = picref;
1764         }
1765
1766         if (av_cmp_q(tb, is->video_st->time_base)) {
1767             av_unused int64_t pts1 = pts_int;
1768             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1769             av_dlog(NULL, "video_thread(): "
1770                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1771                     tb.num, tb.den, pts1,
1772                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1773         }
1774 #else
1775         ret = get_video_frame(is, frame, &pts_int, &pkt);
1776         pos = pkt.pos;
1777         av_free_packet(&pkt);
1778 #endif
1779
1780         if (ret < 0) goto the_end;
1781
1782 #if CONFIG_AVFILTER
1783         if (!picref)
1784             continue;
1785 #endif
1786
1787         pts = pts_int*av_q2d(is->video_st->time_base);
1788
1789         ret = queue_picture(is, frame, pts, pos);
1790
1791         if (ret < 0)
1792             goto the_end;
1793
1794         if (step)
1795             if (cur_stream)
1796                 stream_toggle_pause(cur_stream);
1797     }
1798  the_end:
1799 #if CONFIG_AVFILTER
1800     avfilter_graph_free(&graph);
1801 #endif
1802     av_free(frame);
1803     return 0;
1804 }
1805
1806 static int subtitle_thread(void *arg)
1807 {
1808     VideoState *is = arg;
1809     SubPicture *sp;
1810     AVPacket pkt1, *pkt = &pkt1;
1811     int len1 av_unused, got_subtitle;
1812     double pts;
1813     int i, j;
1814     int r, g, b, y, u, v, a;
1815
1816     for(;;) {
1817         while (is->paused && !is->subtitleq.abort_request) {
1818             SDL_Delay(10);
1819         }
1820         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1821             break;
1822
1823         if(pkt->data == flush_pkt.data){
1824             avcodec_flush_buffers(is->subtitle_st->codec);
1825             continue;
1826         }
1827         SDL_LockMutex(is->subpq_mutex);
1828         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1829                !is->subtitleq.abort_request) {
1830             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1831         }
1832         SDL_UnlockMutex(is->subpq_mutex);
1833
1834         if (is->subtitleq.abort_request)
1835             goto the_end;
1836
1837         sp = &is->subpq[is->subpq_windex];
1838
1839        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1840            this packet, if any */
1841         pts = 0;
1842         if (pkt->pts != AV_NOPTS_VALUE)
1843             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1844
1845         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1846                                     &sp->sub, &got_subtitle,
1847                                     pkt);
1848         if (got_subtitle && sp->sub.format == 0) {
1849             sp->pts = pts;
1850
1851             for (i = 0; i < sp->sub.num_rects; i++)
1852             {
1853                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1854                 {
1855                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1856                     y = RGB_TO_Y_CCIR(r, g, b);
1857                     u = RGB_TO_U_CCIR(r, g, b, 0);
1858                     v = RGB_TO_V_CCIR(r, g, b, 0);
1859                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1860                 }
1861             }
1862
1863             /* now we can update the picture count */
1864             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1865                 is->subpq_windex = 0;
1866             SDL_LockMutex(is->subpq_mutex);
1867             is->subpq_size++;
1868             SDL_UnlockMutex(is->subpq_mutex);
1869         }
1870         av_free_packet(pkt);
1871     }
1872  the_end:
1873     return 0;
1874 }
1875
1876 /* copy samples for viewing in editor window */
1877 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1878 {
1879     int size, len;
1880
1881     size = samples_size / sizeof(short);
1882     while (size > 0) {
1883         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1884         if (len > size)
1885             len = size;
1886         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1887         samples += len;
1888         is->sample_array_index += len;
1889         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1890             is->sample_array_index = 0;
1891         size -= len;
1892     }
1893 }
1894
1895 /* return the new audio buffer size (samples can be added or deleted
1896    to get better sync if video or external master clock) */
1897 static int synchronize_audio(VideoState *is, short *samples,
1898                              int samples_size1, double pts)
1899 {
1900     int n, samples_size;
1901     double ref_clock;
1902
1903     n = 2 * is->audio_st->codec->channels;
1904     samples_size = samples_size1;
1905
1906     /* if not master, then we try to remove or add samples to correct the clock */
1907     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1908          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1909         double diff, avg_diff;
1910         int wanted_size, min_size, max_size, nb_samples;
1911
1912         ref_clock = get_master_clock(is);
1913         diff = get_audio_clock(is) - ref_clock;
1914
1915         if (diff < AV_NOSYNC_THRESHOLD) {
1916             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1917             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1918                 /* not enough measures to have a correct estimate */
1919                 is->audio_diff_avg_count++;
1920             } else {
1921                 /* estimate the A-V difference */
1922                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1923
1924                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1925                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1926                     nb_samples = samples_size / n;
1927
1928                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1929                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1930                     if (wanted_size < min_size)
1931                         wanted_size = min_size;
1932                     else if (wanted_size > max_size)
1933                         wanted_size = max_size;
1934
1935                     /* add or remove samples to correction the synchro */
1936                     if (wanted_size < samples_size) {
1937                         /* remove samples */
1938                         samples_size = wanted_size;
1939                     } else if (wanted_size > samples_size) {
1940                         uint8_t *samples_end, *q;
1941                         int nb;
1942
1943                         /* add samples */
1944                         nb = (samples_size - wanted_size);
1945                         samples_end = (uint8_t *)samples + samples_size - n;
1946                         q = samples_end + n;
1947                         while (nb > 0) {
1948                             memcpy(q, samples_end, n);
1949                             q += n;
1950                             nb -= n;
1951                         }
1952                         samples_size = wanted_size;
1953                     }
1954                 }
1955 #if 0
1956                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1957                        diff, avg_diff, samples_size - samples_size1,
1958                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1959 #endif
1960             }
1961         } else {
1962             /* too big difference : may be initial PTS errors, so
1963                reset A-V filter */
1964             is->audio_diff_avg_count = 0;
1965             is->audio_diff_cum = 0;
1966         }
1967     }
1968
1969     return samples_size;
1970 }
1971
1972 /* decode one audio frame and returns its uncompressed size */
1973 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1974 {
1975     AVPacket *pkt_temp = &is->audio_pkt_temp;
1976     AVPacket *pkt = &is->audio_pkt;
1977     AVCodecContext *dec= is->audio_st->codec;
1978     int n, len1, data_size;
1979     double pts;
1980
1981     for(;;) {
1982         /* NOTE: the audio packet can contain several frames */
1983         while (pkt_temp->size > 0) {
1984             data_size = sizeof(is->audio_buf1);
1985             len1 = avcodec_decode_audio3(dec,
1986                                         (int16_t *)is->audio_buf1, &data_size,
1987                                         pkt_temp);
1988             if (len1 < 0) {
1989                 /* if error, we skip the frame */
1990                 pkt_temp->size = 0;
1991                 break;
1992             }
1993
1994             pkt_temp->data += len1;
1995             pkt_temp->size -= len1;
1996             if (data_size <= 0)
1997                 continue;
1998
1999             if (dec->sample_fmt != is->audio_src_fmt) {
2000                 if (is->reformat_ctx)
2001                     av_audio_convert_free(is->reformat_ctx);
2002                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2003                                                          dec->sample_fmt, 1, NULL, 0);
2004                 if (!is->reformat_ctx) {
2005                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2006                         av_get_sample_fmt_name(dec->sample_fmt),
2007                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2008                         break;
2009                 }
2010                 is->audio_src_fmt= dec->sample_fmt;
2011             }
2012
2013             if (is->reformat_ctx) {
2014                 const void *ibuf[6]= {is->audio_buf1};
2015                 void *obuf[6]= {is->audio_buf2};
2016                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2017                 int ostride[6]= {2};
2018                 int len= data_size/istride[0];
2019                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2020                     printf("av_audio_convert() failed\n");
2021                     break;
2022                 }
2023                 is->audio_buf= is->audio_buf2;
2024                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2025                           remove this legacy cruft */
2026                 data_size= len*2;
2027             }else{
2028                 is->audio_buf= is->audio_buf1;
2029             }
2030
2031             /* if no pts, then compute it */
2032             pts = is->audio_clock;
2033             *pts_ptr = pts;
2034             n = 2 * dec->channels;
2035             is->audio_clock += (double)data_size /
2036                 (double)(n * dec->sample_rate);
2037 #ifdef DEBUG
2038             {
2039                 static double last_clock;
2040                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2041                        is->audio_clock - last_clock,
2042                        is->audio_clock, pts);
2043                 last_clock = is->audio_clock;
2044             }
2045 #endif
2046             return data_size;
2047         }
2048
2049         /* free the current packet */
2050         if (pkt->data)
2051             av_free_packet(pkt);
2052
2053         if (is->paused || is->audioq.abort_request) {
2054             return -1;
2055         }
2056
2057         /* read next packet */
2058         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2059             return -1;
2060         if(pkt->data == flush_pkt.data){
2061             avcodec_flush_buffers(dec);
2062             continue;
2063         }
2064
2065         pkt_temp->data = pkt->data;
2066         pkt_temp->size = pkt->size;
2067
2068         /* if update the audio clock with the pts */
2069         if (pkt->pts != AV_NOPTS_VALUE) {
2070             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2071         }
2072     }
2073 }
2074
2075 /* prepare a new audio buffer */
2076 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2077 {
2078     VideoState *is = opaque;
2079     int audio_size, len1;
2080     double pts;
2081
2082     audio_callback_time = av_gettime();
2083
2084     while (len > 0) {
2085         if (is->audio_buf_index >= is->audio_buf_size) {
2086            audio_size = audio_decode_frame(is, &pts);
2087            if (audio_size < 0) {
2088                 /* if error, just output silence */
2089                is->audio_buf = is->audio_buf1;
2090                is->audio_buf_size = 1024;
2091                memset(is->audio_buf, 0, is->audio_buf_size);
2092            } else {
2093                if (is->show_mode != SHOW_MODE_VIDEO)
2094                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2095                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2096                                               pts);
2097                is->audio_buf_size = audio_size;
2098            }
2099            is->audio_buf_index = 0;
2100         }
2101         len1 = is->audio_buf_size - is->audio_buf_index;
2102         if (len1 > len)
2103             len1 = len;
2104         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2105         len -= len1;
2106         stream += len1;
2107         is->audio_buf_index += len1;
2108     }
2109 }
2110
2111 /* open a given stream. Return 0 if OK */
2112 static int stream_component_open(VideoState *is, int stream_index)
2113 {
2114     AVFormatContext *ic = is->ic;
2115     AVCodecContext *avctx;
2116     AVCodec *codec;
2117     SDL_AudioSpec wanted_spec, spec;
2118
2119     if (stream_index < 0 || stream_index >= ic->nb_streams)
2120         return -1;
2121     avctx = ic->streams[stream_index]->codec;
2122
2123     /* prepare audio output */
2124     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2125         if (avctx->channels > 0) {
2126             avctx->request_channels = FFMIN(2, avctx->channels);
2127         } else {
2128             avctx->request_channels = 2;
2129         }
2130     }
2131
2132     codec = avcodec_find_decoder(avctx->codec_id);
2133     if (!codec)
2134         return -1;
2135
2136     avctx->workaround_bugs = workaround_bugs;
2137     avctx->lowres = lowres;
2138     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2139     avctx->idct_algo= idct;
2140     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2141     avctx->skip_frame= skip_frame;
2142     avctx->skip_idct= skip_idct;
2143     avctx->skip_loop_filter= skip_loop_filter;
2144     avctx->error_recognition= error_recognition;
2145     avctx->error_concealment= error_concealment;
2146     avctx->thread_count= thread_count;
2147
2148     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2149
2150     if(codec->capabilities & CODEC_CAP_DR1)
2151         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2152
2153     if (avcodec_open(avctx, codec) < 0)
2154         return -1;
2155
2156     /* prepare audio output */
2157     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2158         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2159             fprintf(stderr, "Invalid sample rate or channel count\n");
2160             return -1;
2161         }
2162         wanted_spec.freq = avctx->sample_rate;
2163         wanted_spec.format = AUDIO_S16SYS;
2164         wanted_spec.channels = avctx->channels;
2165         wanted_spec.silence = 0;
2166         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2167         wanted_spec.callback = sdl_audio_callback;
2168         wanted_spec.userdata = is;
2169         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2170             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2171             return -1;
2172         }
2173         is->audio_hw_buf_size = spec.size;
2174         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2175     }
2176
2177     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2178     switch(avctx->codec_type) {
2179     case AVMEDIA_TYPE_AUDIO:
2180         is->audio_stream = stream_index;
2181         is->audio_st = ic->streams[stream_index];
2182         is->audio_buf_size = 0;
2183         is->audio_buf_index = 0;
2184
2185         /* init averaging filter */
2186         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2187         is->audio_diff_avg_count = 0;
2188         /* since we do not have a precise anough audio fifo fullness,
2189            we correct audio sync only if larger than this threshold */
2190         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2191
2192         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2193         packet_queue_init(&is->audioq);
2194         SDL_PauseAudio(0);
2195         break;
2196     case AVMEDIA_TYPE_VIDEO:
2197         is->video_stream = stream_index;
2198         is->video_st = ic->streams[stream_index];
2199
2200         packet_queue_init(&is->videoq);
2201         is->video_tid = SDL_CreateThread(video_thread, is);
2202         break;
2203     case AVMEDIA_TYPE_SUBTITLE:
2204         is->subtitle_stream = stream_index;
2205         is->subtitle_st = ic->streams[stream_index];
2206         packet_queue_init(&is->subtitleq);
2207
2208         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2209         break;
2210     default:
2211         break;
2212     }
2213     return 0;
2214 }
2215
2216 static void stream_component_close(VideoState *is, int stream_index)
2217 {
2218     AVFormatContext *ic = is->ic;
2219     AVCodecContext *avctx;
2220
2221     if (stream_index < 0 || stream_index >= ic->nb_streams)
2222         return;
2223     avctx = ic->streams[stream_index]->codec;
2224
2225     switch(avctx->codec_type) {
2226     case AVMEDIA_TYPE_AUDIO:
2227         packet_queue_abort(&is->audioq);
2228
2229         SDL_CloseAudio();
2230
2231         packet_queue_end(&is->audioq);
2232         if (is->reformat_ctx)
2233             av_audio_convert_free(is->reformat_ctx);
2234         is->reformat_ctx = NULL;
2235         break;
2236     case AVMEDIA_TYPE_VIDEO:
2237         packet_queue_abort(&is->videoq);
2238
2239         /* note: we also signal this mutex to make sure we deblock the
2240            video thread in all cases */
2241         SDL_LockMutex(is->pictq_mutex);
2242         SDL_CondSignal(is->pictq_cond);
2243         SDL_UnlockMutex(is->pictq_mutex);
2244
2245         SDL_WaitThread(is->video_tid, NULL);
2246
2247         packet_queue_end(&is->videoq);
2248         break;
2249     case AVMEDIA_TYPE_SUBTITLE:
2250         packet_queue_abort(&is->subtitleq);
2251
2252         /* note: we also signal this mutex to make sure we deblock the
2253            video thread in all cases */
2254         SDL_LockMutex(is->subpq_mutex);
2255         is->subtitle_stream_changed = 1;
2256
2257         SDL_CondSignal(is->subpq_cond);
2258         SDL_UnlockMutex(is->subpq_mutex);
2259
2260         SDL_WaitThread(is->subtitle_tid, NULL);
2261
2262         packet_queue_end(&is->subtitleq);
2263         break;
2264     default:
2265         break;
2266     }
2267
2268     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2269     avcodec_close(avctx);
2270     switch(avctx->codec_type) {
2271     case AVMEDIA_TYPE_AUDIO:
2272         is->audio_st = NULL;
2273         is->audio_stream = -1;
2274         break;
2275     case AVMEDIA_TYPE_VIDEO:
2276         is->video_st = NULL;
2277         is->video_stream = -1;
2278         break;
2279     case AVMEDIA_TYPE_SUBTITLE:
2280         is->subtitle_st = NULL;
2281         is->subtitle_stream = -1;
2282         break;
2283     default:
2284         break;
2285     }
2286 }
2287
2288 /* since we have only one decoding thread, we can use a global
2289    variable instead of a thread local variable */
2290 static VideoState *global_video_state;
2291
2292 static int decode_interrupt_cb(void)
2293 {
2294     return (global_video_state && global_video_state->abort_request);
2295 }
2296
2297 /* this thread gets the stream from the disk or the network */
2298 static int read_thread(void *arg)
2299 {
2300     VideoState *is = arg;
2301     AVFormatContext *ic = NULL;
2302     int err, i, ret;
2303     int st_index[AVMEDIA_TYPE_NB];
2304     AVPacket pkt1, *pkt = &pkt1;
2305     int eof=0;
2306     int pkt_in_play_range = 0;
2307     AVDictionaryEntry *t;
2308
2309     memset(st_index, -1, sizeof(st_index));
2310     is->video_stream = -1;
2311     is->audio_stream = -1;
2312     is->subtitle_stream = -1;
2313
2314     global_video_state = is;
2315     avio_set_interrupt_cb(decode_interrupt_cb);
2316
2317     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2318     if (err < 0) {
2319         print_error(is->filename, err);
2320         ret = -1;
2321         goto fail;
2322     }
2323     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2324         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2325         ret = AVERROR_OPTION_NOT_FOUND;
2326         goto fail;
2327     }
2328     is->ic = ic;
2329
2330     if(genpts)
2331         ic->flags |= AVFMT_FLAG_GENPTS;
2332
2333     err = av_find_stream_info(ic);
2334     if (err < 0) {
2335         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2336         ret = -1;
2337         goto fail;
2338     }
2339     if(ic->pb)
2340         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2341
2342     if(seek_by_bytes<0)
2343         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2344
2345     /* if seeking requested, we execute it */
2346     if (start_time != AV_NOPTS_VALUE) {
2347         int64_t timestamp;
2348
2349         timestamp = start_time;
2350         /* add the stream start time */
2351         if (ic->start_time != AV_NOPTS_VALUE)
2352             timestamp += ic->start_time;
2353         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2354         if (ret < 0) {
2355             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2356                     is->filename, (double)timestamp / AV_TIME_BASE);
2357         }
2358     }
2359
2360     for (i = 0; i < ic->nb_streams; i++)
2361         ic->streams[i]->discard = AVDISCARD_ALL;
2362     if (!video_disable)
2363         st_index[AVMEDIA_TYPE_VIDEO] =
2364             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2365                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2366     if (!audio_disable)
2367         st_index[AVMEDIA_TYPE_AUDIO] =
2368             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2369                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2370                                 st_index[AVMEDIA_TYPE_VIDEO],
2371                                 NULL, 0);
2372     if (!video_disable)
2373         st_index[AVMEDIA_TYPE_SUBTITLE] =
2374             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2375                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2376                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2377                                  st_index[AVMEDIA_TYPE_AUDIO] :
2378                                  st_index[AVMEDIA_TYPE_VIDEO]),
2379                                 NULL, 0);
2380     if (show_status) {
2381         av_dump_format(ic, 0, is->filename, 0);
2382     }
2383
2384     is->show_mode = show_mode;
2385
2386     /* open the streams */
2387     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2388         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2389     }
2390
2391     ret=-1;
2392     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2393         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2394     }
2395     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2396     if (is->show_mode == SHOW_MODE_NONE)
2397         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2398
2399     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2400         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2401     }
2402
2403     if (is->video_stream < 0 && is->audio_stream < 0) {
2404         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2405         ret = -1;
2406         goto fail;
2407     }
2408
2409     for(;;) {
2410         if (is->abort_request)
2411             break;
2412         if (is->paused != is->last_paused) {
2413             is->last_paused = is->paused;
2414             if (is->paused)
2415                 is->read_pause_return= av_read_pause(ic);
2416             else
2417                 av_read_play(ic);
2418         }
2419 #if CONFIG_RTSP_DEMUXER
2420         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2421             /* wait 10 ms to avoid trying to get another packet */
2422             /* XXX: horrible */
2423             SDL_Delay(10);
2424             continue;
2425         }
2426 #endif
2427         if (is->seek_req) {
2428             int64_t seek_target= is->seek_pos;
2429             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2430             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2431 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2432 //      of the seek_pos/seek_rel variables
2433
2434             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2435             if (ret < 0) {
2436                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2437             }else{
2438                 if (is->audio_stream >= 0) {
2439                     packet_queue_flush(&is->audioq);
2440                     packet_queue_put(&is->audioq, &flush_pkt);
2441                 }
2442                 if (is->subtitle_stream >= 0) {
2443                     packet_queue_flush(&is->subtitleq);
2444                     packet_queue_put(&is->subtitleq, &flush_pkt);
2445                 }
2446                 if (is->video_stream >= 0) {
2447                     packet_queue_flush(&is->videoq);
2448                     packet_queue_put(&is->videoq, &flush_pkt);
2449                 }
2450             }
2451             is->seek_req = 0;
2452             eof= 0;
2453         }
2454
2455         /* if the queue are full, no need to read more */
2456         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2457             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2458                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2459                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2460             /* wait 10 ms */
2461             SDL_Delay(10);
2462             continue;
2463         }
2464         if(eof) {
2465             if(is->video_stream >= 0){
2466                 av_init_packet(pkt);
2467                 pkt->data=NULL;
2468                 pkt->size=0;
2469                 pkt->stream_index= is->video_stream;
2470                 packet_queue_put(&is->videoq, pkt);
2471             }
2472             SDL_Delay(10);
2473             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2474                 if(loop!=1 && (!loop || --loop)){
2475                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2476                 }else if(autoexit){
2477                     ret=AVERROR_EOF;
2478                     goto fail;
2479                 }
2480             }
2481             eof=0;
2482             continue;
2483         }
2484         ret = av_read_frame(ic, pkt);
2485         if (ret < 0) {
2486             if (ret == AVERROR_EOF || url_feof(ic->pb))
2487                 eof=1;
2488             if (ic->pb && ic->pb->error)
2489                 break;
2490             SDL_Delay(100); /* wait for user event */
2491             continue;
2492         }
2493         /* check if packet is in play range specified by user, then queue, otherwise discard */
2494         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2495                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2496                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2497                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2498                 <= ((double)duration/1000000);
2499         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2500             packet_queue_put(&is->audioq, pkt);
2501         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2502             packet_queue_put(&is->videoq, pkt);
2503         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2504             packet_queue_put(&is->subtitleq, pkt);
2505         } else {
2506             av_free_packet(pkt);
2507         }
2508     }
2509     /* wait until the end */
2510     while (!is->abort_request) {
2511         SDL_Delay(100);
2512     }
2513
2514     ret = 0;
2515  fail:
2516     /* disable interrupting */
2517     global_video_state = NULL;
2518
2519     /* close each stream */
2520     if (is->audio_stream >= 0)
2521         stream_component_close(is, is->audio_stream);
2522     if (is->video_stream >= 0)
2523         stream_component_close(is, is->video_stream);
2524     if (is->subtitle_stream >= 0)
2525         stream_component_close(is, is->subtitle_stream);
2526     if (is->ic) {
2527         av_close_input_file(is->ic);
2528         is->ic = NULL; /* safety */
2529     }
2530     avio_set_interrupt_cb(NULL);
2531
2532     if (ret != 0) {
2533         SDL_Event event;
2534
2535         event.type = FF_QUIT_EVENT;
2536         event.user.data1 = is;
2537         SDL_PushEvent(&event);
2538     }
2539     return 0;
2540 }
2541
2542 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2543 {
2544     VideoState *is;
2545
2546     is = av_mallocz(sizeof(VideoState));
2547     if (!is)
2548         return NULL;
2549     av_strlcpy(is->filename, filename, sizeof(is->filename));
2550     is->iformat = iformat;
2551     is->ytop = 0;
2552     is->xleft = 0;
2553
2554     /* start video display */
2555     is->pictq_mutex = SDL_CreateMutex();
2556     is->pictq_cond = SDL_CreateCond();
2557
2558     is->subpq_mutex = SDL_CreateMutex();
2559     is->subpq_cond = SDL_CreateCond();
2560
2561     is->av_sync_type = av_sync_type;
2562     is->read_tid = SDL_CreateThread(read_thread, is);
2563     if (!is->read_tid) {
2564         av_free(is);
2565         return NULL;
2566     }
2567     return is;
2568 }
2569
2570 static void stream_cycle_channel(VideoState *is, int codec_type)
2571 {
2572     AVFormatContext *ic = is->ic;
2573     int start_index, stream_index;
2574     AVStream *st;
2575
2576     if (codec_type == AVMEDIA_TYPE_VIDEO)
2577         start_index = is->video_stream;
2578     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2579         start_index = is->audio_stream;
2580     else
2581         start_index = is->subtitle_stream;
2582     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2583         return;
2584     stream_index = start_index;
2585     for(;;) {
2586         if (++stream_index >= is->ic->nb_streams)
2587         {
2588             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2589             {
2590                 stream_index = -1;
2591                 goto the_end;
2592             } else
2593                 stream_index = 0;
2594         }
2595         if (stream_index == start_index)
2596             return;
2597         st = ic->streams[stream_index];
2598         if (st->codec->codec_type == codec_type) {
2599             /* check that parameters are OK */
2600             switch(codec_type) {
2601             case AVMEDIA_TYPE_AUDIO:
2602                 if (st->codec->sample_rate != 0 &&
2603                     st->codec->channels != 0)
2604                     goto the_end;
2605                 break;
2606             case AVMEDIA_TYPE_VIDEO:
2607             case AVMEDIA_TYPE_SUBTITLE:
2608                 goto the_end;
2609             default:
2610                 break;
2611             }
2612         }
2613     }
2614  the_end:
2615     stream_component_close(is, start_index);
2616     stream_component_open(is, stream_index);
2617 }
2618
2619
2620 static void toggle_full_screen(void)
2621 {
2622     is_full_screen = !is_full_screen;
2623     video_open(cur_stream);
2624 }
2625
2626 static void toggle_pause(void)
2627 {
2628     if (cur_stream)
2629         stream_toggle_pause(cur_stream);
2630     step = 0;
2631 }
2632
2633 static void step_to_next_frame(void)
2634 {
2635     if (cur_stream) {
2636         /* if the stream is paused unpause it, then step */
2637         if (cur_stream->paused)
2638             stream_toggle_pause(cur_stream);
2639     }
2640     step = 1;
2641 }
2642
2643 static void toggle_audio_display(void)
2644 {
2645     if (cur_stream) {
2646         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2647         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2648         fill_rectangle(screen,
2649                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2650                     bgcolor);
2651         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2652     }
2653 }
2654
2655 /* handle an event sent by the GUI */
2656 static void event_loop(void)
2657 {
2658     SDL_Event event;
2659     double incr, pos, frac;
2660
2661     for(;;) {
2662         double x;
2663         SDL_WaitEvent(&event);
2664         switch(event.type) {
2665         case SDL_KEYDOWN:
2666             if (exit_on_keydown) {
2667                 do_exit();
2668                 break;
2669             }
2670             switch(event.key.keysym.sym) {
2671             case SDLK_ESCAPE:
2672             case SDLK_q:
2673                 do_exit();
2674                 break;
2675             case SDLK_f:
2676                 toggle_full_screen();
2677                 break;
2678             case SDLK_p:
2679             case SDLK_SPACE:
2680                 toggle_pause();
2681                 break;
2682             case SDLK_s: //S: Step to next frame
2683                 step_to_next_frame();
2684                 break;
2685             case SDLK_a:
2686                 if (cur_stream)
2687                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2688                 break;
2689             case SDLK_v:
2690                 if (cur_stream)
2691                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2692                 break;
2693             case SDLK_t:
2694                 if (cur_stream)
2695                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2696                 break;
2697             case SDLK_w:
2698                 toggle_audio_display();
2699                 break;
2700             case SDLK_LEFT:
2701                 incr = -10.0;
2702                 goto do_seek;
2703             case SDLK_RIGHT:
2704                 incr = 10.0;
2705                 goto do_seek;
2706             case SDLK_UP:
2707                 incr = 60.0;
2708                 goto do_seek;
2709             case SDLK_DOWN:
2710                 incr = -60.0;
2711             do_seek:
2712                 if (cur_stream) {
2713                     if (seek_by_bytes) {
2714                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2715                             pos= cur_stream->video_current_pos;
2716                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2717                             pos= cur_stream->audio_pkt.pos;
2718                         }else
2719                             pos = avio_tell(cur_stream->ic->pb);
2720                         if (cur_stream->ic->bit_rate)
2721                             incr *= cur_stream->ic->bit_rate / 8.0;
2722                         else
2723                             incr *= 180000.0;
2724                         pos += incr;
2725                         stream_seek(cur_stream, pos, incr, 1);
2726                     } else {
2727                         pos = get_master_clock(cur_stream);
2728                         pos += incr;
2729                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2730                     }
2731                 }
2732                 break;
2733             default:
2734                 break;
2735             }
2736             break;
2737         case SDL_MOUSEBUTTONDOWN:
2738             if (exit_on_mousedown) {
2739                 do_exit();
2740                 break;
2741             }
2742         case SDL_MOUSEMOTION:
2743             if(event.type ==SDL_MOUSEBUTTONDOWN){
2744                 x= event.button.x;
2745             }else{
2746                 if(event.motion.state != SDL_PRESSED)
2747                     break;
2748                 x= event.motion.x;
2749             }
2750             if (cur_stream) {
2751                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2752                     uint64_t size=  avio_size(cur_stream->ic->pb);
2753                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2754                 }else{
2755                     int64_t ts;
2756                     int ns, hh, mm, ss;
2757                     int tns, thh, tmm, tss;
2758                     tns = cur_stream->ic->duration/1000000LL;
2759                     thh = tns/3600;
2760                     tmm = (tns%3600)/60;
2761                     tss = (tns%60);
2762                     frac = x/cur_stream->width;
2763                     ns = frac*tns;
2764                     hh = ns/3600;
2765                     mm = (ns%3600)/60;
2766                     ss = (ns%60);
2767                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2768                             hh, mm, ss, thh, tmm, tss);
2769                     ts = frac*cur_stream->ic->duration;
2770                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2771                         ts += cur_stream->ic->start_time;
2772                     stream_seek(cur_stream, ts, 0, 0);
2773                 }
2774             }
2775             break;
2776         case SDL_VIDEORESIZE:
2777             if (cur_stream) {
2778                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2779                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2780                 screen_width = cur_stream->width = event.resize.w;
2781                 screen_height= cur_stream->height= event.resize.h;
2782             }
2783             break;
2784         case SDL_QUIT:
2785         case FF_QUIT_EVENT:
2786             do_exit();
2787             break;
2788         case FF_ALLOC_EVENT:
2789             video_open(event.user.data1);
2790             alloc_picture(event.user.data1);
2791             break;
2792         case FF_REFRESH_EVENT:
2793             video_refresh(event.user.data1);
2794             cur_stream->refresh=0;
2795             break;
2796         default:
2797             break;
2798         }
2799     }
2800 }
2801
2802 static int opt_frame_size(const char *opt, const char *arg)
2803 {
2804     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2805         fprintf(stderr, "Incorrect frame size\n");
2806         return AVERROR(EINVAL);
2807     }
2808     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2809         fprintf(stderr, "Frame size must be a multiple of 2\n");
2810         return AVERROR(EINVAL);
2811     }
2812     return 0;
2813 }
2814
2815 static int opt_width(const char *opt, const char *arg)
2816 {
2817     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2818     return 0;
2819 }
2820
2821 static int opt_height(const char *opt, const char *arg)
2822 {
2823     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2824     return 0;
2825 }
2826
2827 static int opt_format(const char *opt, const char *arg)
2828 {
2829     file_iformat = av_find_input_format(arg);
2830     if (!file_iformat) {
2831         fprintf(stderr, "Unknown input format: %s\n", arg);
2832         return AVERROR(EINVAL);
2833     }
2834     return 0;
2835 }
2836
2837 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2838 {
2839     frame_pix_fmt = av_get_pix_fmt(arg);
2840     return 0;
2841 }
2842
2843 static int opt_sync(const char *opt, const char *arg)
2844 {
2845     if (!strcmp(arg, "audio"))
2846         av_sync_type = AV_SYNC_AUDIO_MASTER;
2847     else if (!strcmp(arg, "video"))
2848         av_sync_type = AV_SYNC_VIDEO_MASTER;
2849     else if (!strcmp(arg, "ext"))
2850         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2851     else {
2852         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2853         exit(1);
2854     }
2855     return 0;
2856 }
2857
2858 static int opt_seek(const char *opt, const char *arg)
2859 {
2860     start_time = parse_time_or_die(opt, arg, 1);
2861     return 0;
2862 }
2863
2864 static int opt_duration(const char *opt, const char *arg)
2865 {
2866     duration = parse_time_or_die(opt, arg, 1);
2867     return 0;
2868 }
2869
2870 static int opt_thread_count(const char *opt, const char *arg)
2871 {
2872     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2873 #if !HAVE_THREADS
2874     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2875 #endif
2876     return 0;
2877 }
2878
2879 static int opt_show_mode(const char *opt, const char *arg)
2880 {
2881     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2882                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2883                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2884                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2885     return 0;
2886 }
2887
2888 static int opt_input_file(const char *opt, const char *filename)
2889 {
2890     if (input_filename) {
2891         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2892                 filename, input_filename);
2893         exit(1);
2894     }
2895     if (!strcmp(filename, "-"))
2896         filename = "pipe:";
2897     input_filename = filename;
2898     return 0;
2899 }
2900
2901 static const OptionDef options[] = {
2902 #include "cmdutils_common_opts.h"
2903     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2904     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2905     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2906     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2907     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2908     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2909     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2910     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2911     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2912     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2913     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2914     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2915     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2916     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2917     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2918     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2919     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2920     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2921     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2922     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2923     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2924     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2925     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2926     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2927     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2928     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2929     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2930     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2931     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2932     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2933     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2934     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2935     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2936     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2937     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2938 #if CONFIG_AVFILTER
2939     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2940 #endif
2941     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2942     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2943     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2944     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2945     { NULL, },
2946 };
2947
2948 static void show_usage(void)
2949 {
2950     printf("Simple media player\n");
2951     printf("usage: ffplay [options] input_file\n");
2952     printf("\n");
2953 }
2954
2955 static int opt_help(const char *opt, const char *arg)
2956 {
2957     av_log_set_callback(log_callback_help);
2958     show_usage();
2959     show_help_options(options, "Main options:\n",
2960                       OPT_EXPERT, 0);
2961     show_help_options(options, "\nAdvanced options:\n",
2962                       OPT_EXPERT, OPT_EXPERT);
2963     printf("\n");
2964     av_opt_show2(avcodec_opts[0], NULL,
2965                  AV_OPT_FLAG_DECODING_PARAM, 0);
2966     printf("\n");
2967     av_opt_show2(avformat_opts, NULL,
2968                  AV_OPT_FLAG_DECODING_PARAM, 0);
2969 #if !CONFIG_AVFILTER
2970     printf("\n");
2971     av_opt_show2(sws_opts, NULL,
2972                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2973 #endif
2974     printf("\nWhile playing:\n"
2975            "q, ESC              quit\n"
2976            "f                   toggle full screen\n"
2977            "p, SPC              pause\n"
2978            "a                   cycle audio channel\n"
2979            "v                   cycle video channel\n"
2980            "t                   cycle subtitle channel\n"
2981            "w                   show audio waves\n"
2982            "s                   activate frame-step mode\n"
2983            "left/right          seek backward/forward 10 seconds\n"
2984            "down/up             seek backward/forward 1 minute\n"
2985            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2986            );
2987     return 0;
2988 }
2989
2990 /* Called from the main */
2991 int main(int argc, char **argv)
2992 {
2993     int flags;
2994
2995     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2996
2997     /* register all codecs, demux and protocols */
2998     avcodec_register_all();
2999 #if CONFIG_AVDEVICE
3000     avdevice_register_all();
3001 #endif
3002 #if CONFIG_AVFILTER
3003     avfilter_register_all();
3004 #endif
3005     av_register_all();
3006
3007     init_opts();
3008
3009     show_banner();
3010
3011     parse_options(argc, argv, options, opt_input_file);
3012
3013     if (!input_filename) {
3014         show_usage();
3015         fprintf(stderr, "An input file must be specified\n");
3016         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3017         exit(1);
3018     }
3019
3020     if (display_disable) {
3021         video_disable = 1;
3022     }
3023     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3024     if (audio_disable)
3025         flags &= ~SDL_INIT_AUDIO;
3026 #if !defined(__MINGW32__) && !defined(__APPLE__)
3027     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3028 #endif
3029     if (SDL_Init (flags)) {
3030         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3031         exit(1);
3032     }
3033
3034     if (!display_disable) {
3035 #if HAVE_SDL_VIDEO_SIZE
3036         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3037         fs_screen_width = vi->current_w;
3038         fs_screen_height = vi->current_h;
3039 #endif
3040     }
3041
3042     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3043     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3044     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3045
3046     av_init_packet(&flush_pkt);
3047     flush_pkt.data= "FLUSH";
3048
3049     cur_stream = stream_open(input_filename, file_iformat);
3050
3051     event_loop();
3052
3053     /* never returns */
3054
3055     return 0;
3056 }