Fix compilation with --disable-avfilter.
[ffmpeg.git] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavcodec/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 #endif
46
47 #include "cmdutils.h"
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #ifdef __MINGW32__
53 #undef main /* We don't want SDL to override our main() */
54 #endif
55
56 #include <unistd.h>
57 #include <assert.h>
58
59 const char program_name[] = "FFplay";
60 const int program_birth_year = 2003;
61
62 //#define DEBUG
63 //#define DEBUG_SYNC
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2*65536)
88
89 static int sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;                                  ///<presentation time stamp for this picture
105     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106     int64_t pos;                                 ///<byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *parse_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143     int dtg_active_format;
144
145     int audio_stream;
146
147     int av_sync_type;
148     double external_clock; /* external clock base */
149     int64_t external_clock_time;
150
151     double audio_clock;
152     double audio_diff_cum; /* used for AV difference average computation */
153     double audio_diff_avg_coef;
154     double audio_diff_threshold;
155     int audio_diff_avg_count;
156     AVStream *audio_st;
157     PacketQueue audioq;
158     int audio_hw_buf_size;
159     /* samples output by the codec. we reserve more space for avsync
160        compensation */
161     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
163     uint8_t *audio_buf;
164     unsigned int audio_buf_size; /* in bytes */
165     int audio_buf_index; /* in bytes */
166     AVPacket audio_pkt_temp;
167     AVPacket audio_pkt;
168     enum AVSampleFormat audio_src_fmt;
169     AVAudioConvert *reformat_ctx;
170
171     int show_audio; /* if true, display audio samples */
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     //    QETimer *video_timer;
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int exit_on_keydown;
265 static int exit_on_mousedown;
266 static int loop=1;
267 static int framedrop=1;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
288 {
289     AVPacketList *pkt1;
290
291     /* duplicate the packet */
292     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
293         return -1;
294
295     pkt1 = av_malloc(sizeof(AVPacketList));
296     if (!pkt1)
297         return -1;
298     pkt1->pkt = *pkt;
299     pkt1->next = NULL;
300
301
302     SDL_LockMutex(q->mutex);
303
304     if (!q->last_pkt)
305
306         q->first_pkt = pkt1;
307     else
308         q->last_pkt->next = pkt1;
309     q->last_pkt = pkt1;
310     q->nb_packets++;
311     q->size += pkt1->pkt.size + sizeof(*pkt1);
312     /* XXX: should duplicate packet data in DV case */
313     SDL_CondSignal(q->cond);
314
315     SDL_UnlockMutex(q->mutex);
316     return 0;
317 }
318
319 /* packet queue handling */
320 static void packet_queue_init(PacketQueue *q)
321 {
322     memset(q, 0, sizeof(PacketQueue));
323     q->mutex = SDL_CreateMutex();
324     q->cond = SDL_CreateCond();
325     packet_queue_put(q, &flush_pkt);
326 }
327
328 static void packet_queue_flush(PacketQueue *q)
329 {
330     AVPacketList *pkt, *pkt1;
331
332     SDL_LockMutex(q->mutex);
333     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
334         pkt1 = pkt->next;
335         av_free_packet(&pkt->pkt);
336         av_freep(&pkt);
337     }
338     q->last_pkt = NULL;
339     q->first_pkt = NULL;
340     q->nb_packets = 0;
341     q->size = 0;
342     SDL_UnlockMutex(q->mutex);
343 }
344
345 static void packet_queue_end(PacketQueue *q)
346 {
347     packet_queue_flush(q);
348     SDL_DestroyMutex(q->mutex);
349     SDL_DestroyCond(q->cond);
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414     int w1, w2, h1, h2;
415
416     /* fill the background */
417     w1 = x;
418     if (w1 < 0)
419         w1 = 0;
420     w2 = s->width - (x + w);
421     if (w2 < 0)
422         w2 = 0;
423     h1 = y;
424     if (h1 < 0)
425         h1 = 0;
426     h2 = s->height - (y + h);
427     if (h2 < 0)
428         h2 = 0;
429     fill_rectangle(screen,
430                    s->xleft, s->ytop,
431                    w1, s->height,
432                    color);
433     fill_rectangle(screen,
434                    s->xleft + s->width - w2, s->ytop,
435                    w2, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + w1, s->ytop,
439                    s->width - w1 - w2, h1,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop + s->height - h2,
443                    s->width - w1 - w2, h2,
444                    color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694 #if CONFIG_AVFILTER
695          if (vp->picref->video->pixel_aspect.num == 0)
696              aspect_ratio = 0;
697          else
698              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
699 #else
700
701         /* XXX: use variable in the frame */
702         if (is->video_st->sample_aspect_ratio.num)
703             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
704         else if (is->video_st->codec->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
706         else
707             aspect_ratio = 0;
708 #endif
709         if (aspect_ratio <= 0.0)
710             aspect_ratio = 1.0;
711         aspect_ratio *= (float)vp->width / (float)vp->height;
712
713         if (is->subtitle_st) {
714             if (is->subpq_size > 0) {
715                 sp = &is->subpq[is->subpq_rindex];
716
717                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
718                     SDL_LockYUVOverlay (vp->bmp);
719
720                     pict.data[0] = vp->bmp->pixels[0];
721                     pict.data[1] = vp->bmp->pixels[2];
722                     pict.data[2] = vp->bmp->pixels[1];
723
724                     pict.linesize[0] = vp->bmp->pitches[0];
725                     pict.linesize[1] = vp->bmp->pitches[2];
726                     pict.linesize[2] = vp->bmp->pitches[1];
727
728                     for (i = 0; i < sp->sub.num_rects; i++)
729                         blend_subrect(&pict, sp->sub.rects[i],
730                                       vp->bmp->w, vp->bmp->h);
731
732                     SDL_UnlockYUVOverlay (vp->bmp);
733                 }
734             }
735         }
736
737
738         /* XXX: we suppose the screen has a 1.0 pixel ratio */
739         height = is->height;
740         width = ((int)rint(height * aspect_ratio)) & ~1;
741         if (width > is->width) {
742             width = is->width;
743             height = ((int)rint(width / aspect_ratio)) & ~1;
744         }
745         x = (is->width - width) / 2;
746         y = (is->height - height) / 2;
747         if (!is->no_background) {
748             /* fill the background */
749             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
750         } else {
751             is->no_background = 0;
752         }
753         rect.x = is->xleft + x;
754         rect.y = is->ytop  + y;
755         rect.w = width;
756         rect.h = height;
757         SDL_DisplayYUVOverlay(vp->bmp, &rect);
758     } else {
759 #if 0
760         fill_rectangle(screen,
761                        is->xleft, is->ytop, is->width, is->height,
762                        QERGB(0x00, 0x00, 0x00));
763 #endif
764     }
765 }
766
767 static inline int compute_mod(int a, int b)
768 {
769     return a < 0 ? a%b + b : a%b;
770 }
771
772 static void video_audio_display(VideoState *s)
773 {
774     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
775     int ch, channels, h, h2, bgcolor, fgcolor;
776     int16_t time_diff;
777     int rdft_bits, nb_freq;
778
779     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
780         ;
781     nb_freq= 1<<(rdft_bits-1);
782
783     /* compute display index : center on currently output samples */
784     channels = s->audio_st->codec->channels;
785     nb_display_channels = channels;
786     if (!s->paused) {
787         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
788         n = 2 * channels;
789         delay = audio_write_get_buf_size(s);
790         delay /= n;
791
792         /* to be more precise, we take into account the time spent since
793            the last buffer computation */
794         if (audio_callback_time) {
795             time_diff = av_gettime() - audio_callback_time;
796             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
797         }
798
799         delay += 2*data_used;
800         if (delay < data_used)
801             delay = data_used;
802
803         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
804         if(s->show_audio==1){
805             h= INT_MIN;
806             for(i=0; i<1000; i+=channels){
807                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
808                 int a= s->sample_array[idx];
809                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
810                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
811                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
812                 int score= a-d;
813                 if(h<score && (b^c)<0){
814                     h= score;
815                     i_start= idx;
816                 }
817             }
818         }
819
820         s->last_i_start = i_start;
821     } else {
822         i_start = s->last_i_start;
823     }
824
825     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
826     if(s->show_audio==1){
827         fill_rectangle(screen,
828                        s->xleft, s->ytop, s->width, s->height,
829                        bgcolor);
830
831         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
832
833         /* total height for one channel */
834         h = s->height / nb_display_channels;
835         /* graph height / 2 */
836         h2 = (h * 9) / 20;
837         for(ch = 0;ch < nb_display_channels; ch++) {
838             i = i_start + ch;
839             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
840             for(x = 0; x < s->width; x++) {
841                 y = (s->sample_array[i] * h2) >> 15;
842                 if (y < 0) {
843                     y = -y;
844                     ys = y1 - y;
845                 } else {
846                     ys = y1;
847                 }
848                 fill_rectangle(screen,
849                                s->xleft + x, ys, 1, y,
850                                fgcolor);
851                 i += channels;
852                 if (i >= SAMPLE_ARRAY_SIZE)
853                     i -= SAMPLE_ARRAY_SIZE;
854             }
855         }
856
857         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
858
859         for(ch = 1;ch < nb_display_channels; ch++) {
860             y = s->ytop + ch * h;
861             fill_rectangle(screen,
862                            s->xleft, y, s->width, 1,
863                            fgcolor);
864         }
865         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
866     }else{
867         nb_display_channels= FFMIN(nb_display_channels, 2);
868         if(rdft_bits != s->rdft_bits){
869             av_rdft_end(s->rdft);
870             av_free(s->rdft_data);
871             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
872             s->rdft_bits= rdft_bits;
873             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
874         }
875         {
876             FFTSample *data[2];
877             for(ch = 0;ch < nb_display_channels; ch++) {
878                 data[ch] = s->rdft_data + 2*nb_freq*ch;
879                 i = i_start + ch;
880                 for(x = 0; x < 2*nb_freq; x++) {
881                     double w= (x-nb_freq)*(1.0/nb_freq);
882                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
883                     i += channels;
884                     if (i >= SAMPLE_ARRAY_SIZE)
885                         i -= SAMPLE_ARRAY_SIZE;
886                 }
887                 av_rdft_calc(s->rdft, data[ch]);
888             }
889             //least efficient way to do this, we should of course directly access it but its more than fast enough
890             for(y=0; y<s->height; y++){
891                 double w= 1/sqrt(nb_freq);
892                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
893                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
894                        + data[1][2*y+1]*data[1][2*y+1])) : a;
895                 a= FFMIN(a,255);
896                 b= FFMIN(b,255);
897                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
898
899                 fill_rectangle(screen,
900                             s->xpos, s->height-y, 1, 1,
901                             fgcolor);
902             }
903         }
904         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
905         s->xpos++;
906         if(s->xpos >= s->width)
907             s->xpos= s->xleft;
908     }
909 }
910
911 static int video_open(VideoState *is){
912     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
913     int w,h;
914
915     if(is_full_screen) flags |= SDL_FULLSCREEN;
916     else               flags |= SDL_RESIZABLE;
917
918     if (is_full_screen && fs_screen_width) {
919         w = fs_screen_width;
920         h = fs_screen_height;
921     } else if(!is_full_screen && screen_width){
922         w = screen_width;
923         h = screen_height;
924 #if CONFIG_AVFILTER
925     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
926         w = is->out_video_filter->inputs[0]->w;
927         h = is->out_video_filter->inputs[0]->h;
928 #else
929     }else if (is->video_st && is->video_st->codec->width){
930         w = is->video_st->codec->width;
931         h = is->video_st->codec->height;
932 #endif
933     } else {
934         w = 640;
935         h = 480;
936     }
937     if(screen && is->width == screen->w && screen->w == w
938        && is->height== screen->h && screen->h == h)
939         return 0;
940
941 #ifndef __APPLE__
942     screen = SDL_SetVideoMode(w, h, 0, flags);
943 #else
944     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
945     screen = SDL_SetVideoMode(w, h, 24, flags);
946 #endif
947     if (!screen) {
948         fprintf(stderr, "SDL: could not set video mode - exiting\n");
949         return -1;
950     }
951     if (!window_title)
952         window_title = input_filename;
953     SDL_WM_SetCaption(window_title, window_title);
954
955     is->width = screen->w;
956     is->height = screen->h;
957
958     return 0;
959 }
960
961 /* display the current picture, if any */
962 static void video_display(VideoState *is)
963 {
964     if(!screen)
965         video_open(cur_stream);
966     if (is->audio_st && is->show_audio)
967         video_audio_display(is);
968     else if (is->video_st)
969         video_image_display(is);
970 }
971
972 static int refresh_thread(void *opaque)
973 {
974     VideoState *is= opaque;
975     while(!is->abort_request){
976         SDL_Event event;
977         event.type = FF_REFRESH_EVENT;
978         event.user.data1 = opaque;
979         if(!is->refresh){
980             is->refresh=1;
981             SDL_PushEvent(&event);
982         }
983         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
984     }
985     return 0;
986 }
987
988 /* get the current audio clock value */
989 static double get_audio_clock(VideoState *is)
990 {
991     double pts;
992     int hw_buf_size, bytes_per_sec;
993     pts = is->audio_clock;
994     hw_buf_size = audio_write_get_buf_size(is);
995     bytes_per_sec = 0;
996     if (is->audio_st) {
997         bytes_per_sec = is->audio_st->codec->sample_rate *
998             2 * is->audio_st->codec->channels;
999     }
1000     if (bytes_per_sec)
1001         pts -= (double)hw_buf_size / bytes_per_sec;
1002     return pts;
1003 }
1004
1005 /* get the current video clock value */
1006 static double get_video_clock(VideoState *is)
1007 {
1008     if (is->paused) {
1009         return is->video_current_pts;
1010     } else {
1011         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1012     }
1013 }
1014
1015 /* get the current external clock value */
1016 static double get_external_clock(VideoState *is)
1017 {
1018     int64_t ti;
1019     ti = av_gettime();
1020     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1021 }
1022
1023 /* get the current master clock value */
1024 static double get_master_clock(VideoState *is)
1025 {
1026     double val;
1027
1028     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1029         if (is->video_st)
1030             val = get_video_clock(is);
1031         else
1032             val = get_audio_clock(is);
1033     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1034         if (is->audio_st)
1035             val = get_audio_clock(is);
1036         else
1037             val = get_video_clock(is);
1038     } else {
1039         val = get_external_clock(is);
1040     }
1041     return val;
1042 }
1043
1044 /* seek in the stream */
1045 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1046 {
1047     if (!is->seek_req) {
1048         is->seek_pos = pos;
1049         is->seek_rel = rel;
1050         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1051         if (seek_by_bytes)
1052             is->seek_flags |= AVSEEK_FLAG_BYTE;
1053         is->seek_req = 1;
1054     }
1055 }
1056
1057 /* pause or resume the video */
1058 static void stream_pause(VideoState *is)
1059 {
1060     if (is->paused) {
1061         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1062         if(is->read_pause_return != AVERROR(ENOSYS)){
1063             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1064         }
1065         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1066     }
1067     is->paused = !is->paused;
1068 }
1069
1070 static double compute_target_time(double frame_current_pts, VideoState *is)
1071 {
1072     double delay, sync_threshold, diff;
1073
1074     /* compute nominal delay */
1075     delay = frame_current_pts - is->frame_last_pts;
1076     if (delay <= 0 || delay >= 10.0) {
1077         /* if incorrect delay, use previous one */
1078         delay = is->frame_last_delay;
1079     } else {
1080         is->frame_last_delay = delay;
1081     }
1082     is->frame_last_pts = frame_current_pts;
1083
1084     /* update delay to follow master synchronisation source */
1085     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1086          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1087         /* if video is slave, we try to correct big delays by
1088            duplicating or deleting a frame */
1089         diff = get_video_clock(is) - get_master_clock(is);
1090
1091         /* skip or repeat frame. We take into account the
1092            delay to compute the threshold. I still don't know
1093            if it is the best guess */
1094         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1095         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1096             if (diff <= -sync_threshold)
1097                 delay = 0;
1098             else if (diff >= sync_threshold)
1099                 delay = 2 * delay;
1100         }
1101     }
1102     is->frame_timer += delay;
1103 #if defined(DEBUG_SYNC)
1104     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1105             delay, actual_delay, frame_current_pts, -diff);
1106 #endif
1107
1108     return is->frame_timer;
1109 }
1110
1111 /* called to display each frame */
1112 static void video_refresh_timer(void *opaque)
1113 {
1114     VideoState *is = opaque;
1115     VideoPicture *vp;
1116
1117     SubPicture *sp, *sp2;
1118
1119     if (is->video_st) {
1120 retry:
1121         if (is->pictq_size == 0) {
1122             //nothing to do, no picture to display in the que
1123         } else {
1124             double time= av_gettime()/1000000.0;
1125             double next_target;
1126             /* dequeue the picture */
1127             vp = &is->pictq[is->pictq_rindex];
1128
1129             if(time < vp->target_clock)
1130                 return;
1131             /* update current video pts */
1132             is->video_current_pts = vp->pts;
1133             is->video_current_pts_drift = is->video_current_pts - time;
1134             is->video_current_pos = vp->pos;
1135             if(is->pictq_size > 1){
1136                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1137                 assert(nextvp->target_clock >= vp->target_clock);
1138                 next_target= nextvp->target_clock;
1139             }else{
1140                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1141             }
1142             if(framedrop && time > next_target){
1143                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1144                 if(is->pictq_size > 1 || time > next_target + 0.5){
1145                     /* update queue size and signal for next picture */
1146                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1147                         is->pictq_rindex = 0;
1148
1149                     SDL_LockMutex(is->pictq_mutex);
1150                     is->pictq_size--;
1151                     SDL_CondSignal(is->pictq_cond);
1152                     SDL_UnlockMutex(is->pictq_mutex);
1153                     goto retry;
1154                 }
1155             }
1156
1157             if(is->subtitle_st) {
1158                 if (is->subtitle_stream_changed) {
1159                     SDL_LockMutex(is->subpq_mutex);
1160
1161                     while (is->subpq_size) {
1162                         free_subpicture(&is->subpq[is->subpq_rindex]);
1163
1164                         /* update queue size and signal for next picture */
1165                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1166                             is->subpq_rindex = 0;
1167
1168                         is->subpq_size--;
1169                     }
1170                     is->subtitle_stream_changed = 0;
1171
1172                     SDL_CondSignal(is->subpq_cond);
1173                     SDL_UnlockMutex(is->subpq_mutex);
1174                 } else {
1175                     if (is->subpq_size > 0) {
1176                         sp = &is->subpq[is->subpq_rindex];
1177
1178                         if (is->subpq_size > 1)
1179                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1180                         else
1181                             sp2 = NULL;
1182
1183                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1184                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1185                         {
1186                             free_subpicture(sp);
1187
1188                             /* update queue size and signal for next picture */
1189                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1190                                 is->subpq_rindex = 0;
1191
1192                             SDL_LockMutex(is->subpq_mutex);
1193                             is->subpq_size--;
1194                             SDL_CondSignal(is->subpq_cond);
1195                             SDL_UnlockMutex(is->subpq_mutex);
1196                         }
1197                     }
1198                 }
1199             }
1200
1201             /* display picture */
1202             if (!display_disable)
1203                 video_display(is);
1204
1205             /* update queue size and signal for next picture */
1206             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1207                 is->pictq_rindex = 0;
1208
1209             SDL_LockMutex(is->pictq_mutex);
1210             is->pictq_size--;
1211             SDL_CondSignal(is->pictq_cond);
1212             SDL_UnlockMutex(is->pictq_mutex);
1213         }
1214     } else if (is->audio_st) {
1215         /* draw the next audio frame */
1216
1217         /* if only audio stream, then display the audio bars (better
1218            than nothing, just to test the implementation */
1219
1220         /* display picture */
1221         if (!display_disable)
1222             video_display(is);
1223     }
1224     if (show_status) {
1225         static int64_t last_time;
1226         int64_t cur_time;
1227         int aqsize, vqsize, sqsize;
1228         double av_diff;
1229
1230         cur_time = av_gettime();
1231         if (!last_time || (cur_time - last_time) >= 30000) {
1232             aqsize = 0;
1233             vqsize = 0;
1234             sqsize = 0;
1235             if (is->audio_st)
1236                 aqsize = is->audioq.size;
1237             if (is->video_st)
1238                 vqsize = is->videoq.size;
1239             if (is->subtitle_st)
1240                 sqsize = is->subtitleq.size;
1241             av_diff = 0;
1242             if (is->audio_st && is->video_st)
1243                 av_diff = get_audio_clock(is) - get_video_clock(is);
1244             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1245                    get_master_clock(is),
1246                    av_diff,
1247                    FFMAX(is->skip_frames-1, 0),
1248                    aqsize / 1024,
1249                    vqsize / 1024,
1250                    sqsize,
1251                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1252                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1253             fflush(stdout);
1254             last_time = cur_time;
1255         }
1256     }
1257 }
1258
1259 static void stream_close(VideoState *is)
1260 {
1261     VideoPicture *vp;
1262     int i;
1263     /* XXX: use a special url_shutdown call to abort parse cleanly */
1264     is->abort_request = 1;
1265     SDL_WaitThread(is->parse_tid, NULL);
1266     SDL_WaitThread(is->refresh_tid, NULL);
1267
1268     /* free all pictures */
1269     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1270         vp = &is->pictq[i];
1271 #if CONFIG_AVFILTER
1272         if (vp->picref) {
1273             avfilter_unref_buffer(vp->picref);
1274             vp->picref = NULL;
1275         }
1276 #endif
1277         if (vp->bmp) {
1278             SDL_FreeYUVOverlay(vp->bmp);
1279             vp->bmp = NULL;
1280         }
1281     }
1282     SDL_DestroyMutex(is->pictq_mutex);
1283     SDL_DestroyCond(is->pictq_cond);
1284     SDL_DestroyMutex(is->subpq_mutex);
1285     SDL_DestroyCond(is->subpq_cond);
1286 #if !CONFIG_AVFILTER
1287     if (is->img_convert_ctx)
1288         sws_freeContext(is->img_convert_ctx);
1289 #endif
1290     av_free(is);
1291 }
1292
1293 static void do_exit(void)
1294 {
1295     if (cur_stream) {
1296         stream_close(cur_stream);
1297         cur_stream = NULL;
1298     }
1299     uninit_opts();
1300 #if CONFIG_AVFILTER
1301     avfilter_uninit();
1302 #endif
1303     if (show_status)
1304         printf("\n");
1305     SDL_Quit();
1306     av_log(NULL, AV_LOG_QUIET, "");
1307     exit(0);
1308 }
1309
1310 /* allocate a picture (needs to do that in main thread to avoid
1311    potential locking problems */
1312 static void alloc_picture(void *opaque)
1313 {
1314     VideoState *is = opaque;
1315     VideoPicture *vp;
1316
1317     vp = &is->pictq[is->pictq_windex];
1318
1319     if (vp->bmp)
1320         SDL_FreeYUVOverlay(vp->bmp);
1321
1322 #if CONFIG_AVFILTER
1323     if (vp->picref)
1324         avfilter_unref_buffer(vp->picref);
1325     vp->picref = NULL;
1326
1327     vp->width   = is->out_video_filter->inputs[0]->w;
1328     vp->height  = is->out_video_filter->inputs[0]->h;
1329     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1330 #else
1331     vp->width   = is->video_st->codec->width;
1332     vp->height  = is->video_st->codec->height;
1333     vp->pix_fmt = is->video_st->codec->pix_fmt;
1334 #endif
1335
1336     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1337                                    SDL_YV12_OVERLAY,
1338                                    screen);
1339     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1340         /* SDL allocates a buffer smaller than requested if the video
1341          * overlay hardware is unable to support the requested size. */
1342         fprintf(stderr, "Error: the video system does not support an image\n"
1343                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1344                         "to reduce the image size.\n", vp->width, vp->height );
1345         do_exit();
1346     }
1347
1348     SDL_LockMutex(is->pictq_mutex);
1349     vp->allocated = 1;
1350     SDL_CondSignal(is->pictq_cond);
1351     SDL_UnlockMutex(is->pictq_mutex);
1352 }
1353
1354 /**
1355  *
1356  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1357  */
1358 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1359 {
1360     VideoPicture *vp;
1361 #if CONFIG_AVFILTER
1362     AVPicture pict_src;
1363 #endif
1364     /* wait until we have space to put a new picture */
1365     SDL_LockMutex(is->pictq_mutex);
1366
1367     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1368         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1369
1370     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1371            !is->videoq.abort_request) {
1372         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1373     }
1374     SDL_UnlockMutex(is->pictq_mutex);
1375
1376     if (is->videoq.abort_request)
1377         return -1;
1378
1379     vp = &is->pictq[is->pictq_windex];
1380
1381     /* alloc or resize hardware picture buffer */
1382     if (!vp->bmp ||
1383 #if CONFIG_AVFILTER
1384         vp->width  != is->out_video_filter->inputs[0]->w ||
1385         vp->height != is->out_video_filter->inputs[0]->h) {
1386 #else
1387         vp->width != is->video_st->codec->width ||
1388         vp->height != is->video_st->codec->height) {
1389 #endif
1390         SDL_Event event;
1391
1392         vp->allocated = 0;
1393
1394         /* the allocation must be done in the main thread to avoid
1395            locking problems */
1396         event.type = FF_ALLOC_EVENT;
1397         event.user.data1 = is;
1398         SDL_PushEvent(&event);
1399
1400         /* wait until the picture is allocated */
1401         SDL_LockMutex(is->pictq_mutex);
1402         while (!vp->allocated && !is->videoq.abort_request) {
1403             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1404         }
1405         SDL_UnlockMutex(is->pictq_mutex);
1406
1407         if (is->videoq.abort_request)
1408             return -1;
1409     }
1410
1411     /* if the frame is not skipped, then display it */
1412     if (vp->bmp) {
1413         AVPicture pict;
1414 #if CONFIG_AVFILTER
1415         if(vp->picref)
1416             avfilter_unref_buffer(vp->picref);
1417         vp->picref = src_frame->opaque;
1418 #endif
1419
1420         /* get a pointer on the bitmap */
1421         SDL_LockYUVOverlay (vp->bmp);
1422
1423         memset(&pict,0,sizeof(AVPicture));
1424         pict.data[0] = vp->bmp->pixels[0];
1425         pict.data[1] = vp->bmp->pixels[2];
1426         pict.data[2] = vp->bmp->pixels[1];
1427
1428         pict.linesize[0] = vp->bmp->pitches[0];
1429         pict.linesize[1] = vp->bmp->pitches[2];
1430         pict.linesize[2] = vp->bmp->pitches[1];
1431
1432 #if CONFIG_AVFILTER
1433         pict_src.data[0] = src_frame->data[0];
1434         pict_src.data[1] = src_frame->data[1];
1435         pict_src.data[2] = src_frame->data[2];
1436
1437         pict_src.linesize[0] = src_frame->linesize[0];
1438         pict_src.linesize[1] = src_frame->linesize[1];
1439         pict_src.linesize[2] = src_frame->linesize[2];
1440
1441         //FIXME use direct rendering
1442         av_picture_copy(&pict, &pict_src,
1443                         vp->pix_fmt, vp->width, vp->height);
1444 #else
1445         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1446         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1447             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1448             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1449         if (is->img_convert_ctx == NULL) {
1450             fprintf(stderr, "Cannot initialize the conversion context\n");
1451             exit(1);
1452         }
1453         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1454                   0, vp->height, pict.data, pict.linesize);
1455 #endif
1456         /* update the bitmap content */
1457         SDL_UnlockYUVOverlay(vp->bmp);
1458
1459         vp->pts = pts;
1460         vp->pos = pos;
1461
1462         /* now we can update the picture count */
1463         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1464             is->pictq_windex = 0;
1465         SDL_LockMutex(is->pictq_mutex);
1466         vp->target_clock= compute_target_time(vp->pts, is);
1467
1468         is->pictq_size++;
1469         SDL_UnlockMutex(is->pictq_mutex);
1470     }
1471     return 0;
1472 }
1473
1474 /**
1475  * compute the exact PTS for the picture if it is omitted in the stream
1476  * @param pts1 the dts of the pkt / pts of the frame
1477  */
1478 static int output_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1479 {
1480     double frame_delay, pts;
1481
1482     pts = pts1;
1483
1484     if (pts != 0) {
1485         /* update video clock with pts, if present */
1486         is->video_clock = pts;
1487     } else {
1488         pts = is->video_clock;
1489     }
1490     /* update video clock for next frame */
1491     frame_delay = av_q2d(is->video_st->codec->time_base);
1492     /* for MPEG2, the frame can be repeated, so we update the
1493        clock accordingly */
1494     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1495     is->video_clock += frame_delay;
1496
1497 #if defined(DEBUG_SYNC) && 0
1498     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1499            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1500 #endif
1501     return queue_picture(is, src_frame, pts, pos);
1502 }
1503
1504 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1505 {
1506     int len1, got_picture, i;
1507
1508     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1509         return -1;
1510
1511     if (pkt->data == flush_pkt.data) {
1512         avcodec_flush_buffers(is->video_st->codec);
1513
1514         SDL_LockMutex(is->pictq_mutex);
1515         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1516         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1517             is->pictq[i].target_clock= 0;
1518         }
1519         while (is->pictq_size && !is->videoq.abort_request) {
1520             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1521         }
1522         is->video_current_pos = -1;
1523         SDL_UnlockMutex(is->pictq_mutex);
1524
1525         is->frame_last_pts = AV_NOPTS_VALUE;
1526         is->frame_last_delay = 0;
1527         is->frame_timer = (double)av_gettime() / 1000000.0;
1528         is->skip_frames = 1;
1529         is->skip_frames_index = 0;
1530         return 0;
1531     }
1532
1533     len1 = avcodec_decode_video2(is->video_st->codec,
1534                                  frame, &got_picture,
1535                                  pkt);
1536
1537     if (got_picture) {
1538         if (decoder_reorder_pts == -1) {
1539             *pts = frame->best_effort_timestamp;
1540         } else if (decoder_reorder_pts) {
1541             *pts = frame->pkt_pts;
1542         } else {
1543             *pts = frame->pkt_dts;
1544         }
1545
1546         if (*pts == AV_NOPTS_VALUE) {
1547             *pts = 0;
1548         }
1549
1550         is->skip_frames_index += 1;
1551         if(is->skip_frames_index >= is->skip_frames){
1552             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1553             return 1;
1554         }
1555
1556     }
1557     return 0;
1558 }
1559
1560 #if CONFIG_AVFILTER
1561 typedef struct {
1562     VideoState *is;
1563     AVFrame *frame;
1564     int use_dr1;
1565 } FilterPriv;
1566
1567 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1568 {
1569     AVFilterContext *ctx = codec->opaque;
1570     AVFilterBufferRef  *ref;
1571     int perms = AV_PERM_WRITE;
1572     int i, w, h, stride[4];
1573     unsigned edge;
1574     int pixel_size;
1575
1576     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1577
1578     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1579         perms |= AV_PERM_NEG_LINESIZES;
1580
1581     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1582         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1583         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1584         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1585     }
1586     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1587
1588     w = codec->width;
1589     h = codec->height;
1590
1591     if(av_image_check_size(w, h, 0, codec))
1592         return -1;
1593
1594     avcodec_align_dimensions2(codec, &w, &h, stride);
1595     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1596     w += edge << 1;
1597     h += edge << 1;
1598
1599     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1600         return -1;
1601
1602     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1603     ref->video->w = codec->width;
1604     ref->video->h = codec->height;
1605     for(i = 0; i < 4; i ++) {
1606         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1607         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1608
1609         if (ref->data[i]) {
1610             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1611         }
1612         pic->data[i]     = ref->data[i];
1613         pic->linesize[i] = ref->linesize[i];
1614     }
1615     pic->opaque = ref;
1616     pic->age    = INT_MAX;
1617     pic->type   = FF_BUFFER_TYPE_USER;
1618     pic->reordered_opaque = codec->reordered_opaque;
1619     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1620     else           pic->pkt_pts = AV_NOPTS_VALUE;
1621     return 0;
1622 }
1623
1624 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1625 {
1626     memset(pic->data, 0, sizeof(pic->data));
1627     avfilter_unref_buffer(pic->opaque);
1628 }
1629
1630 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1631 {
1632     AVFilterBufferRef *ref = pic->opaque;
1633
1634     if (pic->data[0] == NULL) {
1635         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1636         return codec->get_buffer(codec, pic);
1637     }
1638
1639     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1640         (codec->pix_fmt != ref->format)) {
1641         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1642         return -1;
1643     }
1644
1645     pic->reordered_opaque = codec->reordered_opaque;
1646     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1647     else           pic->pkt_pts = AV_NOPTS_VALUE;
1648     return 0;
1649 }
1650
1651 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1652 {
1653     FilterPriv *priv = ctx->priv;
1654     AVCodecContext *codec;
1655     if(!opaque) return -1;
1656
1657     priv->is = opaque;
1658     codec    = priv->is->video_st->codec;
1659     codec->opaque = ctx;
1660     if((codec->codec->capabilities & CODEC_CAP_DR1)
1661     ) {
1662         codec->flags |= CODEC_FLAG_EMU_EDGE;
1663         priv->use_dr1 = 1;
1664         codec->get_buffer     = input_get_buffer;
1665         codec->release_buffer = input_release_buffer;
1666         codec->reget_buffer   = input_reget_buffer;
1667         codec->thread_safe_callbacks = 1;
1668     }
1669
1670     priv->frame = avcodec_alloc_frame();
1671
1672     return 0;
1673 }
1674
1675 static void input_uninit(AVFilterContext *ctx)
1676 {
1677     FilterPriv *priv = ctx->priv;
1678     av_free(priv->frame);
1679 }
1680
1681 static int input_request_frame(AVFilterLink *link)
1682 {
1683     FilterPriv *priv = link->src->priv;
1684     AVFilterBufferRef *picref;
1685     int64_t pts = 0;
1686     AVPacket pkt;
1687     int ret;
1688
1689     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1690         av_free_packet(&pkt);
1691     if (ret < 0)
1692         return -1;
1693
1694     if(priv->use_dr1) {
1695         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1696     } else {
1697         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1698         av_image_copy(picref->data, picref->linesize,
1699                       priv->frame->data, priv->frame->linesize,
1700                       picref->format, link->w, link->h);
1701     }
1702     av_free_packet(&pkt);
1703
1704     picref->pts = pts;
1705     picref->pos = pkt.pos;
1706     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1707     avfilter_start_frame(link, picref);
1708     avfilter_draw_slice(link, 0, link->h, 1);
1709     avfilter_end_frame(link);
1710
1711     return 0;
1712 }
1713
1714 static int input_query_formats(AVFilterContext *ctx)
1715 {
1716     FilterPriv *priv = ctx->priv;
1717     enum PixelFormat pix_fmts[] = {
1718         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1719     };
1720
1721     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1722     return 0;
1723 }
1724
1725 static int input_config_props(AVFilterLink *link)
1726 {
1727     FilterPriv *priv  = link->src->priv;
1728     AVCodecContext *c = priv->is->video_st->codec;
1729
1730     link->w = c->width;
1731     link->h = c->height;
1732     link->time_base = priv->is->video_st->time_base;
1733
1734     return 0;
1735 }
1736
1737 static AVFilter input_filter =
1738 {
1739     .name      = "ffplay_input",
1740
1741     .priv_size = sizeof(FilterPriv),
1742
1743     .init      = input_init,
1744     .uninit    = input_uninit,
1745
1746     .query_formats = input_query_formats,
1747
1748     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1749     .outputs   = (AVFilterPad[]) {{ .name = "default",
1750                                     .type = AVMEDIA_TYPE_VIDEO,
1751                                     .request_frame = input_request_frame,
1752                                     .config_props  = input_config_props, },
1753                                   { .name = NULL }},
1754 };
1755
1756 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1757 {
1758     char sws_flags_str[128];
1759     int ret;
1760     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1761     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1762     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1763     graph->scale_sws_opts = av_strdup(sws_flags_str);
1764
1765     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1766                                             NULL, is, graph)) < 0)
1767         goto the_end;
1768     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1769                                             NULL, &ffsink_ctx, graph)) < 0)
1770         goto the_end;
1771
1772     if(vfilters) {
1773         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1774         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1775
1776         outputs->name    = av_strdup("in");
1777         outputs->filter_ctx = filt_src;
1778         outputs->pad_idx = 0;
1779         outputs->next    = NULL;
1780
1781         inputs->name    = av_strdup("out");
1782         inputs->filter_ctx = filt_out;
1783         inputs->pad_idx = 0;
1784         inputs->next    = NULL;
1785
1786         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1787             goto the_end;
1788         av_freep(&vfilters);
1789     } else {
1790         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1791             goto the_end;
1792     }
1793
1794     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1795         goto the_end;
1796
1797     is->out_video_filter = filt_out;
1798 the_end:
1799     return ret;
1800 }
1801
1802 #endif  /* CONFIG_AVFILTER */
1803
1804 static int video_thread(void *arg)
1805 {
1806     VideoState *is = arg;
1807     AVFrame *frame= avcodec_alloc_frame();
1808     int64_t pts_int;
1809     double pts;
1810     int ret;
1811
1812 #if CONFIG_AVFILTER
1813     AVFilterGraph *graph = avfilter_graph_alloc();
1814     AVFilterContext *filt_out = NULL;
1815     int64_t pos;
1816
1817     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1818         goto the_end;
1819     filt_out = is->out_video_filter;
1820 #endif
1821
1822     for(;;) {
1823 #if !CONFIG_AVFILTER
1824         AVPacket pkt;
1825 #else
1826         AVFilterBufferRef *picref;
1827         AVRational tb;
1828 #endif
1829         while (is->paused && !is->videoq.abort_request)
1830             SDL_Delay(10);
1831 #if CONFIG_AVFILTER
1832         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1833         if (picref) {
1834             pts_int = picref->pts;
1835             pos     = picref->pos;
1836             frame->opaque = picref;
1837         }
1838
1839         if (av_cmp_q(tb, is->video_st->time_base)) {
1840             av_unused int64_t pts1 = pts_int;
1841             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1842             av_dlog(NULL, "video_thread(): "
1843                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1844                     tb.num, tb.den, pts1,
1845                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1846         }
1847 #else
1848         ret = get_video_frame(is, frame, &pts_int, &pkt);
1849 #endif
1850
1851         if (ret < 0) goto the_end;
1852
1853         if (!ret)
1854             continue;
1855
1856         pts = pts_int*av_q2d(is->video_st->time_base);
1857
1858 #if CONFIG_AVFILTER
1859         ret = output_picture(is, frame, pts, pos);
1860 #else
1861         ret = output_picture(is, frame, pts,  pkt.pos);
1862         av_free_packet(&pkt);
1863 #endif
1864         if (ret < 0)
1865             goto the_end;
1866
1867         if (step)
1868             if (cur_stream)
1869                 stream_pause(cur_stream);
1870     }
1871  the_end:
1872 #if CONFIG_AVFILTER
1873     avfilter_graph_free(&graph);
1874 #endif
1875     av_free(frame);
1876     return 0;
1877 }
1878
1879 static int subtitle_thread(void *arg)
1880 {
1881     VideoState *is = arg;
1882     SubPicture *sp;
1883     AVPacket pkt1, *pkt = &pkt1;
1884     int len1, got_subtitle;
1885     double pts;
1886     int i, j;
1887     int r, g, b, y, u, v, a;
1888
1889     for(;;) {
1890         while (is->paused && !is->subtitleq.abort_request) {
1891             SDL_Delay(10);
1892         }
1893         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1894             break;
1895
1896         if(pkt->data == flush_pkt.data){
1897             avcodec_flush_buffers(is->subtitle_st->codec);
1898             continue;
1899         }
1900         SDL_LockMutex(is->subpq_mutex);
1901         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1902                !is->subtitleq.abort_request) {
1903             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1904         }
1905         SDL_UnlockMutex(is->subpq_mutex);
1906
1907         if (is->subtitleq.abort_request)
1908             goto the_end;
1909
1910         sp = &is->subpq[is->subpq_windex];
1911
1912        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1913            this packet, if any */
1914         pts = 0;
1915         if (pkt->pts != AV_NOPTS_VALUE)
1916             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1917
1918         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1919                                     &sp->sub, &got_subtitle,
1920                                     pkt);
1921 //            if (len1 < 0)
1922 //                break;
1923         if (got_subtitle && sp->sub.format == 0) {
1924             sp->pts = pts;
1925
1926             for (i = 0; i < sp->sub.num_rects; i++)
1927             {
1928                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1929                 {
1930                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1931                     y = RGB_TO_Y_CCIR(r, g, b);
1932                     u = RGB_TO_U_CCIR(r, g, b, 0);
1933                     v = RGB_TO_V_CCIR(r, g, b, 0);
1934                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1935                 }
1936             }
1937
1938             /* now we can update the picture count */
1939             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1940                 is->subpq_windex = 0;
1941             SDL_LockMutex(is->subpq_mutex);
1942             is->subpq_size++;
1943             SDL_UnlockMutex(is->subpq_mutex);
1944         }
1945         av_free_packet(pkt);
1946 //        if (step)
1947 //            if (cur_stream)
1948 //                stream_pause(cur_stream);
1949     }
1950  the_end:
1951     return 0;
1952 }
1953
1954 /* copy samples for viewing in editor window */
1955 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1956 {
1957     int size, len, channels;
1958
1959     channels = is->audio_st->codec->channels;
1960
1961     size = samples_size / sizeof(short);
1962     while (size > 0) {
1963         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1964         if (len > size)
1965             len = size;
1966         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1967         samples += len;
1968         is->sample_array_index += len;
1969         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1970             is->sample_array_index = 0;
1971         size -= len;
1972     }
1973 }
1974
1975 /* return the new audio buffer size (samples can be added or deleted
1976    to get better sync if video or external master clock) */
1977 static int synchronize_audio(VideoState *is, short *samples,
1978                              int samples_size1, double pts)
1979 {
1980     int n, samples_size;
1981     double ref_clock;
1982
1983     n = 2 * is->audio_st->codec->channels;
1984     samples_size = samples_size1;
1985
1986     /* if not master, then we try to remove or add samples to correct the clock */
1987     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1988          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1989         double diff, avg_diff;
1990         int wanted_size, min_size, max_size, nb_samples;
1991
1992         ref_clock = get_master_clock(is);
1993         diff = get_audio_clock(is) - ref_clock;
1994
1995         if (diff < AV_NOSYNC_THRESHOLD) {
1996             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1997             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1998                 /* not enough measures to have a correct estimate */
1999                 is->audio_diff_avg_count++;
2000             } else {
2001                 /* estimate the A-V difference */
2002                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2003
2004                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2005                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2006                     nb_samples = samples_size / n;
2007
2008                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2009                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2010                     if (wanted_size < min_size)
2011                         wanted_size = min_size;
2012                     else if (wanted_size > max_size)
2013                         wanted_size = max_size;
2014
2015                     /* add or remove samples to correction the synchro */
2016                     if (wanted_size < samples_size) {
2017                         /* remove samples */
2018                         samples_size = wanted_size;
2019                     } else if (wanted_size > samples_size) {
2020                         uint8_t *samples_end, *q;
2021                         int nb;
2022
2023                         /* add samples */
2024                         nb = (samples_size - wanted_size);
2025                         samples_end = (uint8_t *)samples + samples_size - n;
2026                         q = samples_end + n;
2027                         while (nb > 0) {
2028                             memcpy(q, samples_end, n);
2029                             q += n;
2030                             nb -= n;
2031                         }
2032                         samples_size = wanted_size;
2033                     }
2034                 }
2035 #if 0
2036                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2037                        diff, avg_diff, samples_size - samples_size1,
2038                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2039 #endif
2040             }
2041         } else {
2042             /* too big difference : may be initial PTS errors, so
2043                reset A-V filter */
2044             is->audio_diff_avg_count = 0;
2045             is->audio_diff_cum = 0;
2046         }
2047     }
2048
2049     return samples_size;
2050 }
2051
2052 /* decode one audio frame and returns its uncompressed size */
2053 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2054 {
2055     AVPacket *pkt_temp = &is->audio_pkt_temp;
2056     AVPacket *pkt = &is->audio_pkt;
2057     AVCodecContext *dec= is->audio_st->codec;
2058     int n, len1, data_size;
2059     double pts;
2060
2061     for(;;) {
2062         /* NOTE: the audio packet can contain several frames */
2063         while (pkt_temp->size > 0) {
2064             data_size = sizeof(is->audio_buf1);
2065             len1 = avcodec_decode_audio3(dec,
2066                                         (int16_t *)is->audio_buf1, &data_size,
2067                                         pkt_temp);
2068             if (len1 < 0) {
2069                 /* if error, we skip the frame */
2070                 pkt_temp->size = 0;
2071                 break;
2072             }
2073
2074             pkt_temp->data += len1;
2075             pkt_temp->size -= len1;
2076             if (data_size <= 0)
2077                 continue;
2078
2079             if (dec->sample_fmt != is->audio_src_fmt) {
2080                 if (is->reformat_ctx)
2081                     av_audio_convert_free(is->reformat_ctx);
2082                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2083                                                          dec->sample_fmt, 1, NULL, 0);
2084                 if (!is->reformat_ctx) {
2085                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2086                         av_get_sample_fmt_name(dec->sample_fmt),
2087                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2088                         break;
2089                 }
2090                 is->audio_src_fmt= dec->sample_fmt;
2091             }
2092
2093             if (is->reformat_ctx) {
2094                 const void *ibuf[6]= {is->audio_buf1};
2095                 void *obuf[6]= {is->audio_buf2};
2096                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2097                 int ostride[6]= {2};
2098                 int len= data_size/istride[0];
2099                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2100                     printf("av_audio_convert() failed\n");
2101                     break;
2102                 }
2103                 is->audio_buf= is->audio_buf2;
2104                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2105                           remove this legacy cruft */
2106                 data_size= len*2;
2107             }else{
2108                 is->audio_buf= is->audio_buf1;
2109             }
2110
2111             /* if no pts, then compute it */
2112             pts = is->audio_clock;
2113             *pts_ptr = pts;
2114             n = 2 * dec->channels;
2115             is->audio_clock += (double)data_size /
2116                 (double)(n * dec->sample_rate);
2117 #if defined(DEBUG_SYNC)
2118             {
2119                 static double last_clock;
2120                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2121                        is->audio_clock - last_clock,
2122                        is->audio_clock, pts);
2123                 last_clock = is->audio_clock;
2124             }
2125 #endif
2126             return data_size;
2127         }
2128
2129         /* free the current packet */
2130         if (pkt->data)
2131             av_free_packet(pkt);
2132
2133         if (is->paused || is->audioq.abort_request) {
2134             return -1;
2135         }
2136
2137         /* read next packet */
2138         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2139             return -1;
2140         if(pkt->data == flush_pkt.data){
2141             avcodec_flush_buffers(dec);
2142             continue;
2143         }
2144
2145         pkt_temp->data = pkt->data;
2146         pkt_temp->size = pkt->size;
2147
2148         /* if update the audio clock with the pts */
2149         if (pkt->pts != AV_NOPTS_VALUE) {
2150             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2151         }
2152     }
2153 }
2154
2155 /* get the current audio output buffer size, in samples. With SDL, we
2156    cannot have a precise information */
2157 static int audio_write_get_buf_size(VideoState *is)
2158 {
2159     return is->audio_buf_size - is->audio_buf_index;
2160 }
2161
2162
2163 /* prepare a new audio buffer */
2164 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2165 {
2166     VideoState *is = opaque;
2167     int audio_size, len1;
2168     double pts;
2169
2170     audio_callback_time = av_gettime();
2171
2172     while (len > 0) {
2173         if (is->audio_buf_index >= is->audio_buf_size) {
2174            audio_size = audio_decode_frame(is, &pts);
2175            if (audio_size < 0) {
2176                 /* if error, just output silence */
2177                is->audio_buf = is->audio_buf1;
2178                is->audio_buf_size = 1024;
2179                memset(is->audio_buf, 0, is->audio_buf_size);
2180            } else {
2181                if (is->show_audio)
2182                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2183                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2184                                               pts);
2185                is->audio_buf_size = audio_size;
2186            }
2187            is->audio_buf_index = 0;
2188         }
2189         len1 = is->audio_buf_size - is->audio_buf_index;
2190         if (len1 > len)
2191             len1 = len;
2192         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2193         len -= len1;
2194         stream += len1;
2195         is->audio_buf_index += len1;
2196     }
2197 }
2198
2199 /* open a given stream. Return 0 if OK */
2200 static int stream_component_open(VideoState *is, int stream_index)
2201 {
2202     AVFormatContext *ic = is->ic;
2203     AVCodecContext *avctx;
2204     AVCodec *codec;
2205     SDL_AudioSpec wanted_spec, spec;
2206
2207     if (stream_index < 0 || stream_index >= ic->nb_streams)
2208         return -1;
2209     avctx = ic->streams[stream_index]->codec;
2210
2211     /* prepare audio output */
2212     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2213         if (avctx->channels > 0) {
2214             avctx->request_channels = FFMIN(2, avctx->channels);
2215         } else {
2216             avctx->request_channels = 2;
2217         }
2218     }
2219
2220     codec = avcodec_find_decoder(avctx->codec_id);
2221     avctx->debug_mv = debug_mv;
2222     avctx->debug = debug;
2223     avctx->workaround_bugs = workaround_bugs;
2224     avctx->lowres = lowres;
2225     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2226     avctx->idct_algo= idct;
2227     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2228     avctx->skip_frame= skip_frame;
2229     avctx->skip_idct= skip_idct;
2230     avctx->skip_loop_filter= skip_loop_filter;
2231     avctx->error_recognition= error_recognition;
2232     avctx->error_concealment= error_concealment;
2233     avctx->thread_count= thread_count;
2234
2235     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2236
2237     if (!codec ||
2238         avcodec_open(avctx, codec) < 0)
2239         return -1;
2240
2241     /* prepare audio output */
2242     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2243         wanted_spec.freq = avctx->sample_rate;
2244         wanted_spec.format = AUDIO_S16SYS;
2245         wanted_spec.channels = avctx->channels;
2246         wanted_spec.silence = 0;
2247         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2248         wanted_spec.callback = sdl_audio_callback;
2249         wanted_spec.userdata = is;
2250         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2251             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2252             return -1;
2253         }
2254         is->audio_hw_buf_size = spec.size;
2255         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2256     }
2257
2258     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2259     switch(avctx->codec_type) {
2260     case AVMEDIA_TYPE_AUDIO:
2261         is->audio_stream = stream_index;
2262         is->audio_st = ic->streams[stream_index];
2263         is->audio_buf_size = 0;
2264         is->audio_buf_index = 0;
2265
2266         /* init averaging filter */
2267         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2268         is->audio_diff_avg_count = 0;
2269         /* since we do not have a precise anough audio fifo fullness,
2270            we correct audio sync only if larger than this threshold */
2271         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2272
2273         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2274         packet_queue_init(&is->audioq);
2275         SDL_PauseAudio(0);
2276         break;
2277     case AVMEDIA_TYPE_VIDEO:
2278         is->video_stream = stream_index;
2279         is->video_st = ic->streams[stream_index];
2280
2281 //        is->video_current_pts_time = av_gettime();
2282
2283         packet_queue_init(&is->videoq);
2284         is->video_tid = SDL_CreateThread(video_thread, is);
2285         break;
2286     case AVMEDIA_TYPE_SUBTITLE:
2287         is->subtitle_stream = stream_index;
2288         is->subtitle_st = ic->streams[stream_index];
2289         packet_queue_init(&is->subtitleq);
2290
2291         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2292         break;
2293     default:
2294         break;
2295     }
2296     return 0;
2297 }
2298
2299 static void stream_component_close(VideoState *is, int stream_index)
2300 {
2301     AVFormatContext *ic = is->ic;
2302     AVCodecContext *avctx;
2303
2304     if (stream_index < 0 || stream_index >= ic->nb_streams)
2305         return;
2306     avctx = ic->streams[stream_index]->codec;
2307
2308     switch(avctx->codec_type) {
2309     case AVMEDIA_TYPE_AUDIO:
2310         packet_queue_abort(&is->audioq);
2311
2312         SDL_CloseAudio();
2313
2314         packet_queue_end(&is->audioq);
2315         if (is->reformat_ctx)
2316             av_audio_convert_free(is->reformat_ctx);
2317         is->reformat_ctx = NULL;
2318         break;
2319     case AVMEDIA_TYPE_VIDEO:
2320         packet_queue_abort(&is->videoq);
2321
2322         /* note: we also signal this mutex to make sure we deblock the
2323            video thread in all cases */
2324         SDL_LockMutex(is->pictq_mutex);
2325         SDL_CondSignal(is->pictq_cond);
2326         SDL_UnlockMutex(is->pictq_mutex);
2327
2328         SDL_WaitThread(is->video_tid, NULL);
2329
2330         packet_queue_end(&is->videoq);
2331         break;
2332     case AVMEDIA_TYPE_SUBTITLE:
2333         packet_queue_abort(&is->subtitleq);
2334
2335         /* note: we also signal this mutex to make sure we deblock the
2336            video thread in all cases */
2337         SDL_LockMutex(is->subpq_mutex);
2338         is->subtitle_stream_changed = 1;
2339
2340         SDL_CondSignal(is->subpq_cond);
2341         SDL_UnlockMutex(is->subpq_mutex);
2342
2343         SDL_WaitThread(is->subtitle_tid, NULL);
2344
2345         packet_queue_end(&is->subtitleq);
2346         break;
2347     default:
2348         break;
2349     }
2350
2351     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2352     avcodec_close(avctx);
2353     switch(avctx->codec_type) {
2354     case AVMEDIA_TYPE_AUDIO:
2355         is->audio_st = NULL;
2356         is->audio_stream = -1;
2357         break;
2358     case AVMEDIA_TYPE_VIDEO:
2359         is->video_st = NULL;
2360         is->video_stream = -1;
2361         break;
2362     case AVMEDIA_TYPE_SUBTITLE:
2363         is->subtitle_st = NULL;
2364         is->subtitle_stream = -1;
2365         break;
2366     default:
2367         break;
2368     }
2369 }
2370
2371 /* since we have only one decoding thread, we can use a global
2372    variable instead of a thread local variable */
2373 static VideoState *global_video_state;
2374
2375 static int decode_interrupt_cb(void)
2376 {
2377     return (global_video_state && global_video_state->abort_request);
2378 }
2379
2380 /* this thread gets the stream from the disk or the network */
2381 static int decode_thread(void *arg)
2382 {
2383     VideoState *is = arg;
2384     AVFormatContext *ic;
2385     int err, i, ret;
2386     int st_index[AVMEDIA_TYPE_NB];
2387     AVPacket pkt1, *pkt = &pkt1;
2388     AVFormatParameters params, *ap = &params;
2389     int eof=0;
2390     int pkt_in_play_range = 0;
2391
2392     ic = avformat_alloc_context();
2393
2394     memset(st_index, -1, sizeof(st_index));
2395     is->video_stream = -1;
2396     is->audio_stream = -1;
2397     is->subtitle_stream = -1;
2398
2399     global_video_state = is;
2400     avio_set_interrupt_cb(decode_interrupt_cb);
2401
2402     memset(ap, 0, sizeof(*ap));
2403
2404     ap->prealloced_context = 1;
2405     ap->width = frame_width;
2406     ap->height= frame_height;
2407     ap->time_base= (AVRational){1, 25};
2408     ap->pix_fmt = frame_pix_fmt;
2409
2410     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2411
2412     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2413     if (err < 0) {
2414         print_error(is->filename, err);
2415         ret = -1;
2416         goto fail;
2417     }
2418     is->ic = ic;
2419
2420     if(genpts)
2421         ic->flags |= AVFMT_FLAG_GENPTS;
2422
2423     err = av_find_stream_info(ic);
2424     if (err < 0) {
2425         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2426         ret = -1;
2427         goto fail;
2428     }
2429     if(ic->pb)
2430         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2431
2432     if(seek_by_bytes<0)
2433         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2434
2435     /* if seeking requested, we execute it */
2436     if (start_time != AV_NOPTS_VALUE) {
2437         int64_t timestamp;
2438
2439         timestamp = start_time;
2440         /* add the stream start time */
2441         if (ic->start_time != AV_NOPTS_VALUE)
2442             timestamp += ic->start_time;
2443         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2444         if (ret < 0) {
2445             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2446                     is->filename, (double)timestamp / AV_TIME_BASE);
2447         }
2448     }
2449
2450     for (i = 0; i < ic->nb_streams; i++)
2451         ic->streams[i]->discard = AVDISCARD_ALL;
2452     if (!video_disable)
2453         st_index[AVMEDIA_TYPE_VIDEO] =
2454             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2455                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2456     if (!audio_disable)
2457         st_index[AVMEDIA_TYPE_AUDIO] =
2458             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2459                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2460                                 st_index[AVMEDIA_TYPE_VIDEO],
2461                                 NULL, 0);
2462     if (!video_disable)
2463         st_index[AVMEDIA_TYPE_SUBTITLE] =
2464             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2465                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2466                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2467                                  st_index[AVMEDIA_TYPE_AUDIO] :
2468                                  st_index[AVMEDIA_TYPE_VIDEO]),
2469                                 NULL, 0);
2470     if (show_status) {
2471         av_dump_format(ic, 0, is->filename, 0);
2472     }
2473
2474     /* open the streams */
2475     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2476         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2477     }
2478
2479     ret=-1;
2480     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2481         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2482     }
2483     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2484     if(ret<0) {
2485         if (!display_disable)
2486             is->show_audio = 2;
2487     }
2488
2489     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2490         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2491     }
2492
2493     if (is->video_stream < 0 && is->audio_stream < 0) {
2494         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2495         ret = -1;
2496         goto fail;
2497     }
2498
2499     for(;;) {
2500         if (is->abort_request)
2501             break;
2502         if (is->paused != is->last_paused) {
2503             is->last_paused = is->paused;
2504             if (is->paused)
2505                 is->read_pause_return= av_read_pause(ic);
2506             else
2507                 av_read_play(ic);
2508         }
2509 #if CONFIG_RTSP_DEMUXER
2510         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2511             /* wait 10 ms to avoid trying to get another packet */
2512             /* XXX: horrible */
2513             SDL_Delay(10);
2514             continue;
2515         }
2516 #endif
2517         if (is->seek_req) {
2518             int64_t seek_target= is->seek_pos;
2519             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2520             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2521 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2522 //      of the seek_pos/seek_rel variables
2523
2524             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2525             if (ret < 0) {
2526                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2527             }else{
2528                 if (is->audio_stream >= 0) {
2529                     packet_queue_flush(&is->audioq);
2530                     packet_queue_put(&is->audioq, &flush_pkt);
2531                 }
2532                 if (is->subtitle_stream >= 0) {
2533                     packet_queue_flush(&is->subtitleq);
2534                     packet_queue_put(&is->subtitleq, &flush_pkt);
2535                 }
2536                 if (is->video_stream >= 0) {
2537                     packet_queue_flush(&is->videoq);
2538                     packet_queue_put(&is->videoq, &flush_pkt);
2539                 }
2540             }
2541             is->seek_req = 0;
2542             eof= 0;
2543         }
2544
2545         /* if the queue are full, no need to read more */
2546         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2547             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2548                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2549                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2550             /* wait 10 ms */
2551             SDL_Delay(10);
2552             continue;
2553         }
2554         if(eof) {
2555             if(is->video_stream >= 0){
2556                 av_init_packet(pkt);
2557                 pkt->data=NULL;
2558                 pkt->size=0;
2559                 pkt->stream_index= is->video_stream;
2560                 packet_queue_put(&is->videoq, pkt);
2561             }
2562             SDL_Delay(10);
2563             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2564                 if(loop!=1 && (!loop || --loop)){
2565                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2566                 }else if(autoexit){
2567                     ret=AVERROR_EOF;
2568                     goto fail;
2569                 }
2570             }
2571             eof=0;
2572             continue;
2573         }
2574         ret = av_read_frame(ic, pkt);
2575         if (ret < 0) {
2576             if (ret == AVERROR_EOF || url_feof(ic->pb))
2577                 eof=1;
2578             if (ic->pb && ic->pb->error)
2579                 break;
2580             SDL_Delay(100); /* wait for user event */
2581             continue;
2582         }
2583         /* check if packet is in play range specified by user, then queue, otherwise discard */
2584         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2585                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2586                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2587                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2588                 <= ((double)duration/1000000);
2589         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2590             packet_queue_put(&is->audioq, pkt);
2591         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2592             packet_queue_put(&is->videoq, pkt);
2593         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2594             packet_queue_put(&is->subtitleq, pkt);
2595         } else {
2596             av_free_packet(pkt);
2597         }
2598     }
2599     /* wait until the end */
2600     while (!is->abort_request) {
2601         SDL_Delay(100);
2602     }
2603
2604     ret = 0;
2605  fail:
2606     /* disable interrupting */
2607     global_video_state = NULL;
2608
2609     /* close each stream */
2610     if (is->audio_stream >= 0)
2611         stream_component_close(is, is->audio_stream);
2612     if (is->video_stream >= 0)
2613         stream_component_close(is, is->video_stream);
2614     if (is->subtitle_stream >= 0)
2615         stream_component_close(is, is->subtitle_stream);
2616     if (is->ic) {
2617         av_close_input_file(is->ic);
2618         is->ic = NULL; /* safety */
2619     }
2620     avio_set_interrupt_cb(NULL);
2621
2622     if (ret != 0) {
2623         SDL_Event event;
2624
2625         event.type = FF_QUIT_EVENT;
2626         event.user.data1 = is;
2627         SDL_PushEvent(&event);
2628     }
2629     return 0;
2630 }
2631
2632 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2633 {
2634     VideoState *is;
2635
2636     is = av_mallocz(sizeof(VideoState));
2637     if (!is)
2638         return NULL;
2639     av_strlcpy(is->filename, filename, sizeof(is->filename));
2640     is->iformat = iformat;
2641     is->ytop = 0;
2642     is->xleft = 0;
2643
2644     /* start video display */
2645     is->pictq_mutex = SDL_CreateMutex();
2646     is->pictq_cond = SDL_CreateCond();
2647
2648     is->subpq_mutex = SDL_CreateMutex();
2649     is->subpq_cond = SDL_CreateCond();
2650
2651     is->av_sync_type = av_sync_type;
2652     is->parse_tid = SDL_CreateThread(decode_thread, is);
2653     if (!is->parse_tid) {
2654         av_free(is);
2655         return NULL;
2656     }
2657     return is;
2658 }
2659
2660 static void stream_cycle_channel(VideoState *is, int codec_type)
2661 {
2662     AVFormatContext *ic = is->ic;
2663     int start_index, stream_index;
2664     AVStream *st;
2665
2666     if (codec_type == AVMEDIA_TYPE_VIDEO)
2667         start_index = is->video_stream;
2668     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2669         start_index = is->audio_stream;
2670     else
2671         start_index = is->subtitle_stream;
2672     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2673         return;
2674     stream_index = start_index;
2675     for(;;) {
2676         if (++stream_index >= is->ic->nb_streams)
2677         {
2678             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2679             {
2680                 stream_index = -1;
2681                 goto the_end;
2682             } else
2683                 stream_index = 0;
2684         }
2685         if (stream_index == start_index)
2686             return;
2687         st = ic->streams[stream_index];
2688         if (st->codec->codec_type == codec_type) {
2689             /* check that parameters are OK */
2690             switch(codec_type) {
2691             case AVMEDIA_TYPE_AUDIO:
2692                 if (st->codec->sample_rate != 0 &&
2693                     st->codec->channels != 0)
2694                     goto the_end;
2695                 break;
2696             case AVMEDIA_TYPE_VIDEO:
2697             case AVMEDIA_TYPE_SUBTITLE:
2698                 goto the_end;
2699             default:
2700                 break;
2701             }
2702         }
2703     }
2704  the_end:
2705     stream_component_close(is, start_index);
2706     stream_component_open(is, stream_index);
2707 }
2708
2709
2710 static void toggle_full_screen(void)
2711 {
2712     is_full_screen = !is_full_screen;
2713     if (!fs_screen_width) {
2714         /* use default SDL method */
2715 //        SDL_WM_ToggleFullScreen(screen);
2716     }
2717     video_open(cur_stream);
2718 }
2719
2720 static void toggle_pause(void)
2721 {
2722     if (cur_stream)
2723         stream_pause(cur_stream);
2724     step = 0;
2725 }
2726
2727 static void step_to_next_frame(void)
2728 {
2729     if (cur_stream) {
2730         /* if the stream is paused unpause it, then step */
2731         if (cur_stream->paused)
2732             stream_pause(cur_stream);
2733     }
2734     step = 1;
2735 }
2736
2737 static void toggle_audio_display(void)
2738 {
2739     if (cur_stream) {
2740         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2741         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2742         fill_rectangle(screen,
2743                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2744                     bgcolor);
2745         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2746     }
2747 }
2748
2749 /* handle an event sent by the GUI */
2750 static void event_loop(void)
2751 {
2752     SDL_Event event;
2753     double incr, pos, frac;
2754
2755     for(;;) {
2756         double x;
2757         SDL_WaitEvent(&event);
2758         switch(event.type) {
2759         case SDL_KEYDOWN:
2760             if (exit_on_keydown) {
2761                 do_exit();
2762                 break;
2763             }
2764             switch(event.key.keysym.sym) {
2765             case SDLK_ESCAPE:
2766             case SDLK_q:
2767                 do_exit();
2768                 break;
2769             case SDLK_f:
2770                 toggle_full_screen();
2771                 break;
2772             case SDLK_p:
2773             case SDLK_SPACE:
2774                 toggle_pause();
2775                 break;
2776             case SDLK_s: //S: Step to next frame
2777                 step_to_next_frame();
2778                 break;
2779             case SDLK_a:
2780                 if (cur_stream)
2781                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2782                 break;
2783             case SDLK_v:
2784                 if (cur_stream)
2785                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2786                 break;
2787             case SDLK_t:
2788                 if (cur_stream)
2789                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2790                 break;
2791             case SDLK_w:
2792                 toggle_audio_display();
2793                 break;
2794             case SDLK_LEFT:
2795                 incr = -10.0;
2796                 goto do_seek;
2797             case SDLK_RIGHT:
2798                 incr = 10.0;
2799                 goto do_seek;
2800             case SDLK_UP:
2801                 incr = 60.0;
2802                 goto do_seek;
2803             case SDLK_DOWN:
2804                 incr = -60.0;
2805             do_seek:
2806                 if (cur_stream) {
2807                     if (seek_by_bytes) {
2808                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2809                             pos= cur_stream->video_current_pos;
2810                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2811                             pos= cur_stream->audio_pkt.pos;
2812                         }else
2813                             pos = avio_tell(cur_stream->ic->pb);
2814                         if (cur_stream->ic->bit_rate)
2815                             incr *= cur_stream->ic->bit_rate / 8.0;
2816                         else
2817                             incr *= 180000.0;
2818                         pos += incr;
2819                         stream_seek(cur_stream, pos, incr, 1);
2820                     } else {
2821                         pos = get_master_clock(cur_stream);
2822                         pos += incr;
2823                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2824                     }
2825                 }
2826                 break;
2827             default:
2828                 break;
2829             }
2830             break;
2831         case SDL_MOUSEBUTTONDOWN:
2832             if (exit_on_mousedown) {
2833                 do_exit();
2834                 break;
2835             }
2836         case SDL_MOUSEMOTION:
2837             if(event.type ==SDL_MOUSEBUTTONDOWN){
2838                 x= event.button.x;
2839             }else{
2840                 if(event.motion.state != SDL_PRESSED)
2841                     break;
2842                 x= event.motion.x;
2843             }
2844             if (cur_stream) {
2845                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2846                     uint64_t size=  avio_size(cur_stream->ic->pb);
2847                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2848                 }else{
2849                     int64_t ts;
2850                     int ns, hh, mm, ss;
2851                     int tns, thh, tmm, tss;
2852                     tns = cur_stream->ic->duration/1000000LL;
2853                     thh = tns/3600;
2854                     tmm = (tns%3600)/60;
2855                     tss = (tns%60);
2856                     frac = x/cur_stream->width;
2857                     ns = frac*tns;
2858                     hh = ns/3600;
2859                     mm = (ns%3600)/60;
2860                     ss = (ns%60);
2861                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2862                             hh, mm, ss, thh, tmm, tss);
2863                     ts = frac*cur_stream->ic->duration;
2864                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2865                         ts += cur_stream->ic->start_time;
2866                     stream_seek(cur_stream, ts, 0, 0);
2867                 }
2868             }
2869             break;
2870         case SDL_VIDEORESIZE:
2871             if (cur_stream) {
2872                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2873                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2874                 screen_width = cur_stream->width = event.resize.w;
2875                 screen_height= cur_stream->height= event.resize.h;
2876             }
2877             break;
2878         case SDL_QUIT:
2879         case FF_QUIT_EVENT:
2880             do_exit();
2881             break;
2882         case FF_ALLOC_EVENT:
2883             video_open(event.user.data1);
2884             alloc_picture(event.user.data1);
2885             break;
2886         case FF_REFRESH_EVENT:
2887             video_refresh_timer(event.user.data1);
2888             cur_stream->refresh=0;
2889             break;
2890         default:
2891             break;
2892         }
2893     }
2894 }
2895
2896 static void opt_frame_size(const char *arg)
2897 {
2898     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2899         fprintf(stderr, "Incorrect frame size\n");
2900         exit(1);
2901     }
2902     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2903         fprintf(stderr, "Frame size must be a multiple of 2\n");
2904         exit(1);
2905     }
2906 }
2907
2908 static int opt_width(const char *opt, const char *arg)
2909 {
2910     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2911     return 0;
2912 }
2913
2914 static int opt_height(const char *opt, const char *arg)
2915 {
2916     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2917     return 0;
2918 }
2919
2920 static void opt_format(const char *arg)
2921 {
2922     file_iformat = av_find_input_format(arg);
2923     if (!file_iformat) {
2924         fprintf(stderr, "Unknown input format: %s\n", arg);
2925         exit(1);
2926     }
2927 }
2928
2929 static void opt_frame_pix_fmt(const char *arg)
2930 {
2931     frame_pix_fmt = av_get_pix_fmt(arg);
2932 }
2933
2934 static int opt_sync(const char *opt, const char *arg)
2935 {
2936     if (!strcmp(arg, "audio"))
2937         av_sync_type = AV_SYNC_AUDIO_MASTER;
2938     else if (!strcmp(arg, "video"))
2939         av_sync_type = AV_SYNC_VIDEO_MASTER;
2940     else if (!strcmp(arg, "ext"))
2941         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2942     else {
2943         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2944         exit(1);
2945     }
2946     return 0;
2947 }
2948
2949 static int opt_seek(const char *opt, const char *arg)
2950 {
2951     start_time = parse_time_or_die(opt, arg, 1);
2952     return 0;
2953 }
2954
2955 static int opt_duration(const char *opt, const char *arg)
2956 {
2957     duration = parse_time_or_die(opt, arg, 1);
2958     return 0;
2959 }
2960
2961 static int opt_debug(const char *opt, const char *arg)
2962 {
2963     av_log_set_level(99);
2964     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2965     return 0;
2966 }
2967
2968 static int opt_vismv(const char *opt, const char *arg)
2969 {
2970     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2971     return 0;
2972 }
2973
2974 static int opt_thread_count(const char *opt, const char *arg)
2975 {
2976     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2977 #if !HAVE_THREADS
2978     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2979 #endif
2980     return 0;
2981 }
2982
2983 static const OptionDef options[] = {
2984 #include "cmdutils_common_opts.h"
2985     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2986     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2987     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2988     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2989     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2990     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2991     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2992     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2993     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2994     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2995     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2996     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2997     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2998     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2999     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3000     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3001     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3002     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3003     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3004     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3005     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3006     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3007     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3008     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3009     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3010     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3011     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3012     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3013     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3014     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3015     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3016     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3017     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3018     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3019     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3020     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3021     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3022 #if CONFIG_AVFILTER
3023     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3024 #endif
3025     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3026     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3027     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3028     { NULL, },
3029 };
3030
3031 static void show_usage(void)
3032 {
3033     printf("Simple media player\n");
3034     printf("usage: ffplay [options] input_file\n");
3035     printf("\n");
3036 }
3037
3038 static void show_help(void)
3039 {
3040     av_log_set_callback(log_callback_help);
3041     show_usage();
3042     show_help_options(options, "Main options:\n",
3043                       OPT_EXPERT, 0);
3044     show_help_options(options, "\nAdvanced options:\n",
3045                       OPT_EXPERT, OPT_EXPERT);
3046     printf("\n");
3047     av_opt_show2(avcodec_opts[0], NULL,
3048                  AV_OPT_FLAG_DECODING_PARAM, 0);
3049     printf("\n");
3050     av_opt_show2(avformat_opts, NULL,
3051                  AV_OPT_FLAG_DECODING_PARAM, 0);
3052 #if !CONFIG_AVFILTER
3053     printf("\n");
3054     av_opt_show2(sws_opts, NULL,
3055                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3056 #endif
3057     printf("\nWhile playing:\n"
3058            "q, ESC              quit\n"
3059            "f                   toggle full screen\n"
3060            "p, SPC              pause\n"
3061            "a                   cycle audio channel\n"
3062            "v                   cycle video channel\n"
3063            "t                   cycle subtitle channel\n"
3064            "w                   show audio waves\n"
3065            "s                   activate frame-step mode\n"
3066            "left/right          seek backward/forward 10 seconds\n"
3067            "down/up             seek backward/forward 1 minute\n"
3068            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3069            );
3070 }
3071
3072 static void opt_input_file(const char *filename)
3073 {
3074     if (input_filename) {
3075         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3076                 filename, input_filename);
3077         exit(1);
3078     }
3079     if (!strcmp(filename, "-"))
3080         filename = "pipe:";
3081     input_filename = filename;
3082 }
3083
3084 /* Called from the main */
3085 int main(int argc, char **argv)
3086 {
3087     int flags;
3088
3089     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3090
3091     /* register all codecs, demux and protocols */
3092     avcodec_register_all();
3093 #if CONFIG_AVDEVICE
3094     avdevice_register_all();
3095 #endif
3096 #if CONFIG_AVFILTER
3097     avfilter_register_all();
3098 #endif
3099     av_register_all();
3100
3101     init_opts();
3102
3103     show_banner();
3104
3105     parse_options(argc, argv, options, opt_input_file);
3106
3107     if (!input_filename) {
3108         show_usage();
3109         fprintf(stderr, "An input file must be specified\n");
3110         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3111         exit(1);
3112     }
3113
3114     if (display_disable) {
3115         video_disable = 1;
3116     }
3117     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3118 #if !defined(__MINGW32__) && !defined(__APPLE__)
3119     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3120 #endif
3121     if (SDL_Init (flags)) {
3122         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3123         exit(1);
3124     }
3125
3126     if (!display_disable) {
3127 #if HAVE_SDL_VIDEO_SIZE
3128         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3129         fs_screen_width = vi->current_w;
3130         fs_screen_height = vi->current_h;
3131 #endif
3132     }
3133
3134     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3135     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3136     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3137
3138     av_init_packet(&flush_pkt);
3139     flush_pkt.data= "FLUSH";
3140
3141     cur_stream = stream_open(input_filename, file_iformat);
3142
3143     event_loop();
3144
3145     /* never returns */
3146
3147     return 0;
3148 }