ffplay: removed unused variable channels.
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 #endif
46
47 #include "cmdutils.h"
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #ifdef __MINGW32__
53 #undef main /* We don't want SDL to override our main() */
54 #endif
55
56 #include <unistd.h>
57 #include <assert.h>
58
59 const char program_name[] = "ffplay";
60 const int program_birth_year = 2003;
61
62 //#define DEBUG
63 //#define DEBUG_SYNC
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2*65536)
88
89 static int sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;                                  ///<presentation time stamp for this picture
105     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106     int64_t pos;                                 ///<byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *read_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     enum ShowMode {
171         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
172     } show_mode;
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;                   ///<current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB]={
237     [AVMEDIA_TYPE_AUDIO]=-1,
238     [AVMEDIA_TYPE_VIDEO]=-1,
239     [AVMEDIA_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int exit_on_keydown;
264 static int exit_on_mousedown;
265 static int loop=1;
266 static int framedrop=1;
267 static enum ShowMode show_mode = SHOW_MODE_NONE;
268
269 static int rdftspeed=20;
270 #if CONFIG_AVFILTER
271 static char *vfilters = NULL;
272 #endif
273
274 /* current context */
275 static int is_full_screen;
276 static VideoState *cur_stream;
277 static int64_t audio_callback_time;
278
279 static AVPacket flush_pkt;
280
281 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
282 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284
285 static SDL_Surface *screen;
286
287 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
288 {
289     AVPacketList *pkt1;
290
291     /* duplicate the packet */
292     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
293         return -1;
294
295     pkt1 = av_malloc(sizeof(AVPacketList));
296     if (!pkt1)
297         return -1;
298     pkt1->pkt = *pkt;
299     pkt1->next = NULL;
300
301
302     SDL_LockMutex(q->mutex);
303
304     if (!q->last_pkt)
305
306         q->first_pkt = pkt1;
307     else
308         q->last_pkt->next = pkt1;
309     q->last_pkt = pkt1;
310     q->nb_packets++;
311     q->size += pkt1->pkt.size + sizeof(*pkt1);
312     /* XXX: should duplicate packet data in DV case */
313     SDL_CondSignal(q->cond);
314
315     SDL_UnlockMutex(q->mutex);
316     return 0;
317 }
318
319 /* packet queue handling */
320 static void packet_queue_init(PacketQueue *q)
321 {
322     memset(q, 0, sizeof(PacketQueue));
323     q->mutex = SDL_CreateMutex();
324     q->cond = SDL_CreateCond();
325     packet_queue_put(q, &flush_pkt);
326 }
327
328 static void packet_queue_flush(PacketQueue *q)
329 {
330     AVPacketList *pkt, *pkt1;
331
332     SDL_LockMutex(q->mutex);
333     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
334         pkt1 = pkt->next;
335         av_free_packet(&pkt->pkt);
336         av_freep(&pkt);
337     }
338     q->last_pkt = NULL;
339     q->first_pkt = NULL;
340     q->nb_packets = 0;
341     q->size = 0;
342     SDL_UnlockMutex(q->mutex);
343 }
344
345 static void packet_queue_end(PacketQueue *q)
346 {
347     packet_queue_flush(q);
348     SDL_DestroyMutex(q->mutex);
349     SDL_DestroyCond(q->cond);
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414     int w1, w2, h1, h2;
415
416     /* fill the background */
417     w1 = x;
418     if (w1 < 0)
419         w1 = 0;
420     w2 = s->width - (x + w);
421     if (w2 < 0)
422         w2 = 0;
423     h1 = y;
424     if (h1 < 0)
425         h1 = 0;
426     h2 = s->height - (y + h);
427     if (h2 < 0)
428         h2 = 0;
429     fill_rectangle(screen,
430                    s->xleft, s->ytop,
431                    w1, s->height,
432                    color);
433     fill_rectangle(screen,
434                    s->xleft + s->width - w2, s->ytop,
435                    w2, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + w1, s->ytop,
439                    s->width - w1 - w2, h1,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop + s->height - h2,
443                    s->width - w1 - w2, h2,
444                    color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694 #if CONFIG_AVFILTER
695          if (vp->picref->video->sample_aspect_ratio.num == 0)
696              aspect_ratio = 0;
697          else
698              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
699 #else
700
701         /* XXX: use variable in the frame */
702         if (is->video_st->sample_aspect_ratio.num)
703             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
704         else if (is->video_st->codec->sample_aspect_ratio.num)
705             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
706         else
707             aspect_ratio = 0;
708 #endif
709         if (aspect_ratio <= 0.0)
710             aspect_ratio = 1.0;
711         aspect_ratio *= (float)vp->width / (float)vp->height;
712
713         if (is->subtitle_st) {
714             if (is->subpq_size > 0) {
715                 sp = &is->subpq[is->subpq_rindex];
716
717                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
718                     SDL_LockYUVOverlay (vp->bmp);
719
720                     pict.data[0] = vp->bmp->pixels[0];
721                     pict.data[1] = vp->bmp->pixels[2];
722                     pict.data[2] = vp->bmp->pixels[1];
723
724                     pict.linesize[0] = vp->bmp->pitches[0];
725                     pict.linesize[1] = vp->bmp->pitches[2];
726                     pict.linesize[2] = vp->bmp->pitches[1];
727
728                     for (i = 0; i < sp->sub.num_rects; i++)
729                         blend_subrect(&pict, sp->sub.rects[i],
730                                       vp->bmp->w, vp->bmp->h);
731
732                     SDL_UnlockYUVOverlay (vp->bmp);
733                 }
734             }
735         }
736
737
738         /* XXX: we suppose the screen has a 1.0 pixel ratio */
739         height = is->height;
740         width = ((int)rint(height * aspect_ratio)) & ~1;
741         if (width > is->width) {
742             width = is->width;
743             height = ((int)rint(width / aspect_ratio)) & ~1;
744         }
745         x = (is->width - width) / 2;
746         y = (is->height - height) / 2;
747         if (!is->no_background) {
748             /* fill the background */
749             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
750         } else {
751             is->no_background = 0;
752         }
753         rect.x = is->xleft + x;
754         rect.y = is->ytop  + y;
755         rect.w = FFMAX(width,  1);
756         rect.h = FFMAX(height, 1);
757         SDL_DisplayYUVOverlay(vp->bmp, &rect);
758     } else {
759 #if 0
760         fill_rectangle(screen,
761                        is->xleft, is->ytop, is->width, is->height,
762                        QERGB(0x00, 0x00, 0x00));
763 #endif
764     }
765 }
766
767 /* get the current audio output buffer size, in samples. With SDL, we
768    cannot have a precise information */
769 static int audio_write_get_buf_size(VideoState *is)
770 {
771     return is->audio_buf_size - is->audio_buf_index;
772 }
773
774 static inline int compute_mod(int a, int b)
775 {
776     return a < 0 ? a%b + b : a%b;
777 }
778
779 static void video_audio_display(VideoState *s)
780 {
781     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
782     int ch, channels, h, h2, bgcolor, fgcolor;
783     int16_t time_diff;
784     int rdft_bits, nb_freq;
785
786     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
787         ;
788     nb_freq= 1<<(rdft_bits-1);
789
790     /* compute display index : center on currently output samples */
791     channels = s->audio_st->codec->channels;
792     nb_display_channels = channels;
793     if (!s->paused) {
794         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
795         n = 2 * channels;
796         delay = audio_write_get_buf_size(s);
797         delay /= n;
798
799         /* to be more precise, we take into account the time spent since
800            the last buffer computation */
801         if (audio_callback_time) {
802             time_diff = av_gettime() - audio_callback_time;
803             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
804         }
805
806         delay += 2*data_used;
807         if (delay < data_used)
808             delay = data_used;
809
810         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
811         if (s->show_mode == SHOW_MODE_WAVES) {
812             h= INT_MIN;
813             for(i=0; i<1000; i+=channels){
814                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
815                 int a= s->sample_array[idx];
816                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
817                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
818                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
819                 int score= a-d;
820                 if(h<score && (b^c)<0){
821                     h= score;
822                     i_start= idx;
823                 }
824             }
825         }
826
827         s->last_i_start = i_start;
828     } else {
829         i_start = s->last_i_start;
830     }
831
832     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
833     if (s->show_mode == SHOW_MODE_WAVES) {
834         fill_rectangle(screen,
835                        s->xleft, s->ytop, s->width, s->height,
836                        bgcolor);
837
838         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
839
840         /* total height for one channel */
841         h = s->height / nb_display_channels;
842         /* graph height / 2 */
843         h2 = (h * 9) / 20;
844         for(ch = 0;ch < nb_display_channels; ch++) {
845             i = i_start + ch;
846             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
847             for(x = 0; x < s->width; x++) {
848                 y = (s->sample_array[i] * h2) >> 15;
849                 if (y < 0) {
850                     y = -y;
851                     ys = y1 - y;
852                 } else {
853                     ys = y1;
854                 }
855                 fill_rectangle(screen,
856                                s->xleft + x, ys, 1, y,
857                                fgcolor);
858                 i += channels;
859                 if (i >= SAMPLE_ARRAY_SIZE)
860                     i -= SAMPLE_ARRAY_SIZE;
861             }
862         }
863
864         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
865
866         for(ch = 1;ch < nb_display_channels; ch++) {
867             y = s->ytop + ch * h;
868             fill_rectangle(screen,
869                            s->xleft, y, s->width, 1,
870                            fgcolor);
871         }
872         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
873     }else{
874         nb_display_channels= FFMIN(nb_display_channels, 2);
875         if(rdft_bits != s->rdft_bits){
876             av_rdft_end(s->rdft);
877             av_free(s->rdft_data);
878             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
879             s->rdft_bits= rdft_bits;
880             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
881         }
882         {
883             FFTSample *data[2];
884             for(ch = 0;ch < nb_display_channels; ch++) {
885                 data[ch] = s->rdft_data + 2*nb_freq*ch;
886                 i = i_start + ch;
887                 for(x = 0; x < 2*nb_freq; x++) {
888                     double w= (x-nb_freq)*(1.0/nb_freq);
889                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
890                     i += channels;
891                     if (i >= SAMPLE_ARRAY_SIZE)
892                         i -= SAMPLE_ARRAY_SIZE;
893                 }
894                 av_rdft_calc(s->rdft, data[ch]);
895             }
896             //least efficient way to do this, we should of course directly access it but its more than fast enough
897             for(y=0; y<s->height; y++){
898                 double w= 1/sqrt(nb_freq);
899                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
900                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
901                        + data[1][2*y+1]*data[1][2*y+1])) : a;
902                 a= FFMIN(a,255);
903                 b= FFMIN(b,255);
904                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
905
906                 fill_rectangle(screen,
907                             s->xpos, s->height-y, 1, 1,
908                             fgcolor);
909             }
910         }
911         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
912         s->xpos++;
913         if(s->xpos >= s->width)
914             s->xpos= s->xleft;
915     }
916 }
917
918 static int video_open(VideoState *is){
919     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
920     int w,h;
921
922     if(is_full_screen) flags |= SDL_FULLSCREEN;
923     else               flags |= SDL_RESIZABLE;
924
925     if (is_full_screen && fs_screen_width) {
926         w = fs_screen_width;
927         h = fs_screen_height;
928     } else if(!is_full_screen && screen_width){
929         w = screen_width;
930         h = screen_height;
931 #if CONFIG_AVFILTER
932     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
933         w = is->out_video_filter->inputs[0]->w;
934         h = is->out_video_filter->inputs[0]->h;
935 #else
936     }else if (is->video_st && is->video_st->codec->width){
937         w = is->video_st->codec->width;
938         h = is->video_st->codec->height;
939 #endif
940     } else {
941         w = 640;
942         h = 480;
943     }
944     if(screen && is->width == screen->w && screen->w == w
945        && is->height== screen->h && screen->h == h)
946         return 0;
947
948 #ifndef __APPLE__
949     screen = SDL_SetVideoMode(w, h, 0, flags);
950 #else
951     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
952     screen = SDL_SetVideoMode(w, h, 24, flags);
953 #endif
954     if (!screen) {
955         fprintf(stderr, "SDL: could not set video mode - exiting\n");
956         return -1;
957     }
958     if (!window_title)
959         window_title = input_filename;
960     SDL_WM_SetCaption(window_title, window_title);
961
962     is->width = screen->w;
963     is->height = screen->h;
964
965     return 0;
966 }
967
968 /* display the current picture, if any */
969 static void video_display(VideoState *is)
970 {
971     if(!screen)
972         video_open(cur_stream);
973     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
974         video_audio_display(is);
975     else if (is->video_st)
976         video_image_display(is);
977 }
978
979 static int refresh_thread(void *opaque)
980 {
981     VideoState *is= opaque;
982     while(!is->abort_request){
983         SDL_Event event;
984         event.type = FF_REFRESH_EVENT;
985         event.user.data1 = opaque;
986         if(!is->refresh){
987             is->refresh=1;
988             SDL_PushEvent(&event);
989         }
990         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
991         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
992     }
993     return 0;
994 }
995
996 /* get the current audio clock value */
997 static double get_audio_clock(VideoState *is)
998 {
999     double pts;
1000     int hw_buf_size, bytes_per_sec;
1001     pts = is->audio_clock;
1002     hw_buf_size = audio_write_get_buf_size(is);
1003     bytes_per_sec = 0;
1004     if (is->audio_st) {
1005         bytes_per_sec = is->audio_st->codec->sample_rate *
1006             2 * is->audio_st->codec->channels;
1007     }
1008     if (bytes_per_sec)
1009         pts -= (double)hw_buf_size / bytes_per_sec;
1010     return pts;
1011 }
1012
1013 /* get the current video clock value */
1014 static double get_video_clock(VideoState *is)
1015 {
1016     if (is->paused) {
1017         return is->video_current_pts;
1018     } else {
1019         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1020     }
1021 }
1022
1023 /* get the current external clock value */
1024 static double get_external_clock(VideoState *is)
1025 {
1026     int64_t ti;
1027     ti = av_gettime();
1028     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1029 }
1030
1031 /* get the current master clock value */
1032 static double get_master_clock(VideoState *is)
1033 {
1034     double val;
1035
1036     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1037         if (is->video_st)
1038             val = get_video_clock(is);
1039         else
1040             val = get_audio_clock(is);
1041     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1042         if (is->audio_st)
1043             val = get_audio_clock(is);
1044         else
1045             val = get_video_clock(is);
1046     } else {
1047         val = get_external_clock(is);
1048     }
1049     return val;
1050 }
1051
1052 /* seek in the stream */
1053 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1054 {
1055     if (!is->seek_req) {
1056         is->seek_pos = pos;
1057         is->seek_rel = rel;
1058         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1059         if (seek_by_bytes)
1060             is->seek_flags |= AVSEEK_FLAG_BYTE;
1061         is->seek_req = 1;
1062     }
1063 }
1064
1065 /* pause or resume the video */
1066 static void stream_toggle_pause(VideoState *is)
1067 {
1068     if (is->paused) {
1069         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1070         if(is->read_pause_return != AVERROR(ENOSYS)){
1071             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1072         }
1073         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1074     }
1075     is->paused = !is->paused;
1076 }
1077
1078 static double compute_target_time(double frame_current_pts, VideoState *is)
1079 {
1080     double delay, sync_threshold, diff;
1081
1082     /* compute nominal delay */
1083     delay = frame_current_pts - is->frame_last_pts;
1084     if (delay <= 0 || delay >= 10.0) {
1085         /* if incorrect delay, use previous one */
1086         delay = is->frame_last_delay;
1087     } else {
1088         is->frame_last_delay = delay;
1089     }
1090     is->frame_last_pts = frame_current_pts;
1091
1092     /* update delay to follow master synchronisation source */
1093     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1094          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1095         /* if video is slave, we try to correct big delays by
1096            duplicating or deleting a frame */
1097         diff = get_video_clock(is) - get_master_clock(is);
1098
1099         /* skip or repeat frame. We take into account the
1100            delay to compute the threshold. I still don't know
1101            if it is the best guess */
1102         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1103         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1104             if (diff <= -sync_threshold)
1105                 delay = 0;
1106             else if (diff >= sync_threshold)
1107                 delay = 2 * delay;
1108         }
1109     }
1110     is->frame_timer += delay;
1111 #if defined(DEBUG_SYNC)
1112     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1113             delay, actual_delay, frame_current_pts, -diff);
1114 #endif
1115
1116     return is->frame_timer;
1117 }
1118
1119 /* called to display each frame */
1120 static void video_refresh(void *opaque)
1121 {
1122     VideoState *is = opaque;
1123     VideoPicture *vp;
1124
1125     SubPicture *sp, *sp2;
1126
1127     if (is->video_st) {
1128 retry:
1129         if (is->pictq_size == 0) {
1130             //nothing to do, no picture to display in the que
1131         } else {
1132             double time= av_gettime()/1000000.0;
1133             double next_target;
1134             /* dequeue the picture */
1135             vp = &is->pictq[is->pictq_rindex];
1136
1137             if(time < vp->target_clock)
1138                 return;
1139             /* update current video pts */
1140             is->video_current_pts = vp->pts;
1141             is->video_current_pts_drift = is->video_current_pts - time;
1142             is->video_current_pos = vp->pos;
1143             if(is->pictq_size > 1){
1144                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1145                 assert(nextvp->target_clock >= vp->target_clock);
1146                 next_target= nextvp->target_clock;
1147             }else{
1148                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1149             }
1150             if(framedrop && time > next_target){
1151                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1152                 if(is->pictq_size > 1 || time > next_target + 0.5){
1153                     /* update queue size and signal for next picture */
1154                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1155                         is->pictq_rindex = 0;
1156
1157                     SDL_LockMutex(is->pictq_mutex);
1158                     is->pictq_size--;
1159                     SDL_CondSignal(is->pictq_cond);
1160                     SDL_UnlockMutex(is->pictq_mutex);
1161                     goto retry;
1162                 }
1163             }
1164
1165             if(is->subtitle_st) {
1166                 if (is->subtitle_stream_changed) {
1167                     SDL_LockMutex(is->subpq_mutex);
1168
1169                     while (is->subpq_size) {
1170                         free_subpicture(&is->subpq[is->subpq_rindex]);
1171
1172                         /* update queue size and signal for next picture */
1173                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1174                             is->subpq_rindex = 0;
1175
1176                         is->subpq_size--;
1177                     }
1178                     is->subtitle_stream_changed = 0;
1179
1180                     SDL_CondSignal(is->subpq_cond);
1181                     SDL_UnlockMutex(is->subpq_mutex);
1182                 } else {
1183                     if (is->subpq_size > 0) {
1184                         sp = &is->subpq[is->subpq_rindex];
1185
1186                         if (is->subpq_size > 1)
1187                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1188                         else
1189                             sp2 = NULL;
1190
1191                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1192                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1193                         {
1194                             free_subpicture(sp);
1195
1196                             /* update queue size and signal for next picture */
1197                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1198                                 is->subpq_rindex = 0;
1199
1200                             SDL_LockMutex(is->subpq_mutex);
1201                             is->subpq_size--;
1202                             SDL_CondSignal(is->subpq_cond);
1203                             SDL_UnlockMutex(is->subpq_mutex);
1204                         }
1205                     }
1206                 }
1207             }
1208
1209             /* display picture */
1210             if (!display_disable)
1211                 video_display(is);
1212
1213             /* update queue size and signal for next picture */
1214             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1215                 is->pictq_rindex = 0;
1216
1217             SDL_LockMutex(is->pictq_mutex);
1218             is->pictq_size--;
1219             SDL_CondSignal(is->pictq_cond);
1220             SDL_UnlockMutex(is->pictq_mutex);
1221         }
1222     } else if (is->audio_st) {
1223         /* draw the next audio frame */
1224
1225         /* if only audio stream, then display the audio bars (better
1226            than nothing, just to test the implementation */
1227
1228         /* display picture */
1229         if (!display_disable)
1230             video_display(is);
1231     }
1232     if (show_status) {
1233         static int64_t last_time;
1234         int64_t cur_time;
1235         int aqsize, vqsize, sqsize;
1236         double av_diff;
1237
1238         cur_time = av_gettime();
1239         if (!last_time || (cur_time - last_time) >= 30000) {
1240             aqsize = 0;
1241             vqsize = 0;
1242             sqsize = 0;
1243             if (is->audio_st)
1244                 aqsize = is->audioq.size;
1245             if (is->video_st)
1246                 vqsize = is->videoq.size;
1247             if (is->subtitle_st)
1248                 sqsize = is->subtitleq.size;
1249             av_diff = 0;
1250             if (is->audio_st && is->video_st)
1251                 av_diff = get_audio_clock(is) - get_video_clock(is);
1252             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1253                    get_master_clock(is),
1254                    av_diff,
1255                    FFMAX(is->skip_frames-1, 0),
1256                    aqsize / 1024,
1257                    vqsize / 1024,
1258                    sqsize,
1259                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1260                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1261             fflush(stdout);
1262             last_time = cur_time;
1263         }
1264     }
1265 }
1266
1267 static void stream_close(VideoState *is)
1268 {
1269     VideoPicture *vp;
1270     int i;
1271     /* XXX: use a special url_shutdown call to abort parse cleanly */
1272     is->abort_request = 1;
1273     SDL_WaitThread(is->read_tid, NULL);
1274     SDL_WaitThread(is->refresh_tid, NULL);
1275
1276     /* free all pictures */
1277     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1278         vp = &is->pictq[i];
1279 #if CONFIG_AVFILTER
1280         if (vp->picref) {
1281             avfilter_unref_buffer(vp->picref);
1282             vp->picref = NULL;
1283         }
1284 #endif
1285         if (vp->bmp) {
1286             SDL_FreeYUVOverlay(vp->bmp);
1287             vp->bmp = NULL;
1288         }
1289     }
1290     SDL_DestroyMutex(is->pictq_mutex);
1291     SDL_DestroyCond(is->pictq_cond);
1292     SDL_DestroyMutex(is->subpq_mutex);
1293     SDL_DestroyCond(is->subpq_cond);
1294 #if !CONFIG_AVFILTER
1295     if (is->img_convert_ctx)
1296         sws_freeContext(is->img_convert_ctx);
1297 #endif
1298     av_free(is);
1299 }
1300
1301 static void do_exit(void)
1302 {
1303     if (cur_stream) {
1304         stream_close(cur_stream);
1305         cur_stream = NULL;
1306     }
1307     uninit_opts();
1308 #if CONFIG_AVFILTER
1309     avfilter_uninit();
1310 #endif
1311     if (show_status)
1312         printf("\n");
1313     SDL_Quit();
1314     av_log(NULL, AV_LOG_QUIET, "");
1315     exit(0);
1316 }
1317
1318 /* allocate a picture (needs to do that in main thread to avoid
1319    potential locking problems */
1320 static void alloc_picture(void *opaque)
1321 {
1322     VideoState *is = opaque;
1323     VideoPicture *vp;
1324
1325     vp = &is->pictq[is->pictq_windex];
1326
1327     if (vp->bmp)
1328         SDL_FreeYUVOverlay(vp->bmp);
1329
1330 #if CONFIG_AVFILTER
1331     if (vp->picref)
1332         avfilter_unref_buffer(vp->picref);
1333     vp->picref = NULL;
1334
1335     vp->width   = is->out_video_filter->inputs[0]->w;
1336     vp->height  = is->out_video_filter->inputs[0]->h;
1337     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1338 #else
1339     vp->width   = is->video_st->codec->width;
1340     vp->height  = is->video_st->codec->height;
1341     vp->pix_fmt = is->video_st->codec->pix_fmt;
1342 #endif
1343
1344     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1345                                    SDL_YV12_OVERLAY,
1346                                    screen);
1347     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1348         /* SDL allocates a buffer smaller than requested if the video
1349          * overlay hardware is unable to support the requested size. */
1350         fprintf(stderr, "Error: the video system does not support an image\n"
1351                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1352                         "to reduce the image size.\n", vp->width, vp->height );
1353         do_exit();
1354     }
1355
1356     SDL_LockMutex(is->pictq_mutex);
1357     vp->allocated = 1;
1358     SDL_CondSignal(is->pictq_cond);
1359     SDL_UnlockMutex(is->pictq_mutex);
1360 }
1361
1362 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1363 {
1364     VideoPicture *vp;
1365     double frame_delay, pts = pts1;
1366
1367     /* compute the exact PTS for the picture if it is omitted in the stream
1368      * pts1 is the dts of the pkt / pts of the frame */
1369     if (pts != 0) {
1370         /* update video clock with pts, if present */
1371         is->video_clock = pts;
1372     } else {
1373         pts = is->video_clock;
1374     }
1375     /* update video clock for next frame */
1376     frame_delay = av_q2d(is->video_st->codec->time_base);
1377     /* for MPEG2, the frame can be repeated, so we update the
1378        clock accordingly */
1379     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1380     is->video_clock += frame_delay;
1381
1382 #if defined(DEBUG_SYNC) && 0
1383     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1384            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1385 #endif
1386
1387     /* wait until we have space to put a new picture */
1388     SDL_LockMutex(is->pictq_mutex);
1389
1390     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1391         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1392
1393     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1394            !is->videoq.abort_request) {
1395         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1396     }
1397     SDL_UnlockMutex(is->pictq_mutex);
1398
1399     if (is->videoq.abort_request)
1400         return -1;
1401
1402     vp = &is->pictq[is->pictq_windex];
1403
1404     /* alloc or resize hardware picture buffer */
1405     if (!vp->bmp ||
1406 #if CONFIG_AVFILTER
1407         vp->width  != is->out_video_filter->inputs[0]->w ||
1408         vp->height != is->out_video_filter->inputs[0]->h) {
1409 #else
1410         vp->width != is->video_st->codec->width ||
1411         vp->height != is->video_st->codec->height) {
1412 #endif
1413         SDL_Event event;
1414
1415         vp->allocated = 0;
1416
1417         /* the allocation must be done in the main thread to avoid
1418            locking problems */
1419         event.type = FF_ALLOC_EVENT;
1420         event.user.data1 = is;
1421         SDL_PushEvent(&event);
1422
1423         /* wait until the picture is allocated */
1424         SDL_LockMutex(is->pictq_mutex);
1425         while (!vp->allocated && !is->videoq.abort_request) {
1426             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1427         }
1428         SDL_UnlockMutex(is->pictq_mutex);
1429
1430         if (is->videoq.abort_request)
1431             return -1;
1432     }
1433
1434     /* if the frame is not skipped, then display it */
1435     if (vp->bmp) {
1436         AVPicture pict;
1437 #if CONFIG_AVFILTER
1438         if(vp->picref)
1439             avfilter_unref_buffer(vp->picref);
1440         vp->picref = src_frame->opaque;
1441 #endif
1442
1443         /* get a pointer on the bitmap */
1444         SDL_LockYUVOverlay (vp->bmp);
1445
1446         memset(&pict,0,sizeof(AVPicture));
1447         pict.data[0] = vp->bmp->pixels[0];
1448         pict.data[1] = vp->bmp->pixels[2];
1449         pict.data[2] = vp->bmp->pixels[1];
1450
1451         pict.linesize[0] = vp->bmp->pitches[0];
1452         pict.linesize[1] = vp->bmp->pitches[2];
1453         pict.linesize[2] = vp->bmp->pitches[1];
1454
1455 #if CONFIG_AVFILTER
1456         //FIXME use direct rendering
1457         av_picture_copy(&pict, (AVPicture *)src_frame,
1458                         vp->pix_fmt, vp->width, vp->height);
1459 #else
1460         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1461         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1462             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1463             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1464         if (is->img_convert_ctx == NULL) {
1465             fprintf(stderr, "Cannot initialize the conversion context\n");
1466             exit(1);
1467         }
1468         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1469                   0, vp->height, pict.data, pict.linesize);
1470 #endif
1471         /* update the bitmap content */
1472         SDL_UnlockYUVOverlay(vp->bmp);
1473
1474         vp->pts = pts;
1475         vp->pos = pos;
1476
1477         /* now we can update the picture count */
1478         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1479             is->pictq_windex = 0;
1480         SDL_LockMutex(is->pictq_mutex);
1481         vp->target_clock= compute_target_time(vp->pts, is);
1482
1483         is->pictq_size++;
1484         SDL_UnlockMutex(is->pictq_mutex);
1485     }
1486     return 0;
1487 }
1488
1489 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1490 {
1491     int len1, got_picture, i;
1492
1493     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1494         return -1;
1495
1496     if (pkt->data == flush_pkt.data) {
1497         avcodec_flush_buffers(is->video_st->codec);
1498
1499         SDL_LockMutex(is->pictq_mutex);
1500         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1501         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1502             is->pictq[i].target_clock= 0;
1503         }
1504         while (is->pictq_size && !is->videoq.abort_request) {
1505             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1506         }
1507         is->video_current_pos = -1;
1508         SDL_UnlockMutex(is->pictq_mutex);
1509
1510         is->frame_last_pts = AV_NOPTS_VALUE;
1511         is->frame_last_delay = 0;
1512         is->frame_timer = (double)av_gettime() / 1000000.0;
1513         is->skip_frames = 1;
1514         is->skip_frames_index = 0;
1515         return 0;
1516     }
1517
1518     len1 = avcodec_decode_video2(is->video_st->codec,
1519                                  frame, &got_picture,
1520                                  pkt);
1521
1522     if (got_picture) {
1523         if (decoder_reorder_pts == -1) {
1524             *pts = frame->best_effort_timestamp;
1525         } else if (decoder_reorder_pts) {
1526             *pts = frame->pkt_pts;
1527         } else {
1528             *pts = frame->pkt_dts;
1529         }
1530
1531         if (*pts == AV_NOPTS_VALUE) {
1532             *pts = 0;
1533         }
1534
1535         is->skip_frames_index += 1;
1536         if(is->skip_frames_index >= is->skip_frames){
1537             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1538             return 1;
1539         }
1540
1541     }
1542     return 0;
1543 }
1544
1545 #if CONFIG_AVFILTER
1546 typedef struct {
1547     VideoState *is;
1548     AVFrame *frame;
1549     int use_dr1;
1550 } FilterPriv;
1551
1552 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1553 {
1554     AVFilterContext *ctx = codec->opaque;
1555     AVFilterBufferRef  *ref;
1556     int perms = AV_PERM_WRITE;
1557     int i, w, h, stride[4];
1558     unsigned edge;
1559     int pixel_size;
1560
1561     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1562
1563     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1564         perms |= AV_PERM_NEG_LINESIZES;
1565
1566     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1567         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1568         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1569         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1570     }
1571     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1572
1573     w = codec->width;
1574     h = codec->height;
1575
1576     if(av_image_check_size(w, h, 0, codec))
1577         return -1;
1578
1579     avcodec_align_dimensions2(codec, &w, &h, stride);
1580     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1581     w += edge << 1;
1582     h += edge << 1;
1583
1584     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1585         return -1;
1586
1587     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1588     ref->video->w = codec->width;
1589     ref->video->h = codec->height;
1590     for(i = 0; i < 4; i ++) {
1591         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1592         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1593
1594         if (ref->data[i]) {
1595             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1596         }
1597         pic->data[i]     = ref->data[i];
1598         pic->linesize[i] = ref->linesize[i];
1599     }
1600     pic->opaque = ref;
1601     pic->age    = INT_MAX;
1602     pic->type   = FF_BUFFER_TYPE_USER;
1603     pic->reordered_opaque = codec->reordered_opaque;
1604     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1605     else           pic->pkt_pts = AV_NOPTS_VALUE;
1606     return 0;
1607 }
1608
1609 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1610 {
1611     memset(pic->data, 0, sizeof(pic->data));
1612     avfilter_unref_buffer(pic->opaque);
1613 }
1614
1615 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1616 {
1617     AVFilterBufferRef *ref = pic->opaque;
1618
1619     if (pic->data[0] == NULL) {
1620         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1621         return codec->get_buffer(codec, pic);
1622     }
1623
1624     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1625         (codec->pix_fmt != ref->format)) {
1626         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1627         return -1;
1628     }
1629
1630     pic->reordered_opaque = codec->reordered_opaque;
1631     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1632     else           pic->pkt_pts = AV_NOPTS_VALUE;
1633     return 0;
1634 }
1635
1636 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1637 {
1638     FilterPriv *priv = ctx->priv;
1639     AVCodecContext *codec;
1640     if(!opaque) return -1;
1641
1642     priv->is = opaque;
1643     codec    = priv->is->video_st->codec;
1644     codec->opaque = ctx;
1645     if((codec->codec->capabilities & CODEC_CAP_DR1)
1646     ) {
1647         codec->flags |= CODEC_FLAG_EMU_EDGE;
1648         priv->use_dr1 = 1;
1649         codec->get_buffer     = input_get_buffer;
1650         codec->release_buffer = input_release_buffer;
1651         codec->reget_buffer   = input_reget_buffer;
1652         codec->thread_safe_callbacks = 1;
1653     }
1654
1655     priv->frame = avcodec_alloc_frame();
1656
1657     return 0;
1658 }
1659
1660 static void input_uninit(AVFilterContext *ctx)
1661 {
1662     FilterPriv *priv = ctx->priv;
1663     av_free(priv->frame);
1664 }
1665
1666 static int input_request_frame(AVFilterLink *link)
1667 {
1668     FilterPriv *priv = link->src->priv;
1669     AVFilterBufferRef *picref;
1670     int64_t pts = 0;
1671     AVPacket pkt;
1672     int ret;
1673
1674     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1675         av_free_packet(&pkt);
1676     if (ret < 0)
1677         return -1;
1678
1679     if(priv->use_dr1) {
1680         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1681     } else {
1682         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1683         av_image_copy(picref->data, picref->linesize,
1684                       priv->frame->data, priv->frame->linesize,
1685                       picref->format, link->w, link->h);
1686     }
1687     av_free_packet(&pkt);
1688
1689     picref->pts = pts;
1690     picref->pos = priv->frame->pkt_pos;
1691     picref->video->sample_aspect_ratio = priv->frame->sample_aspect_ratio;
1692     avfilter_start_frame(link, picref);
1693     avfilter_draw_slice(link, 0, link->h, 1);
1694     avfilter_end_frame(link);
1695
1696     return 0;
1697 }
1698
1699 static int input_query_formats(AVFilterContext *ctx)
1700 {
1701     FilterPriv *priv = ctx->priv;
1702     enum PixelFormat pix_fmts[] = {
1703         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1704     };
1705
1706     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1707     return 0;
1708 }
1709
1710 static int input_config_props(AVFilterLink *link)
1711 {
1712     FilterPriv *priv  = link->src->priv;
1713     AVCodecContext *c = priv->is->video_st->codec;
1714
1715     link->w = c->width;
1716     link->h = c->height;
1717     link->time_base = priv->is->video_st->time_base;
1718
1719     return 0;
1720 }
1721
1722 static AVFilter input_filter =
1723 {
1724     .name      = "ffplay_input",
1725
1726     .priv_size = sizeof(FilterPriv),
1727
1728     .init      = input_init,
1729     .uninit    = input_uninit,
1730
1731     .query_formats = input_query_formats,
1732
1733     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1734     .outputs   = (AVFilterPad[]) {{ .name = "default",
1735                                     .type = AVMEDIA_TYPE_VIDEO,
1736                                     .request_frame = input_request_frame,
1737                                     .config_props  = input_config_props, },
1738                                   { .name = NULL }},
1739 };
1740
1741 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1742 {
1743     char sws_flags_str[128];
1744     int ret;
1745     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1746     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1747     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1748     graph->scale_sws_opts = av_strdup(sws_flags_str);
1749
1750     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1751                                             NULL, is, graph)) < 0)
1752         goto the_end;
1753     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1754                                             NULL, &ffsink_ctx, graph)) < 0)
1755         goto the_end;
1756
1757     if(vfilters) {
1758         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1759         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1760
1761         outputs->name    = av_strdup("in");
1762         outputs->filter_ctx = filt_src;
1763         outputs->pad_idx = 0;
1764         outputs->next    = NULL;
1765
1766         inputs->name    = av_strdup("out");
1767         inputs->filter_ctx = filt_out;
1768         inputs->pad_idx = 0;
1769         inputs->next    = NULL;
1770
1771         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1772             goto the_end;
1773         av_freep(&vfilters);
1774     } else {
1775         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1776             goto the_end;
1777     }
1778
1779     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1780         goto the_end;
1781
1782     is->out_video_filter = filt_out;
1783 the_end:
1784     return ret;
1785 }
1786
1787 #endif  /* CONFIG_AVFILTER */
1788
1789 static int video_thread(void *arg)
1790 {
1791     VideoState *is = arg;
1792     AVFrame *frame= avcodec_alloc_frame();
1793     int64_t pts_int, pos;
1794     double pts;
1795     int ret;
1796
1797 #if CONFIG_AVFILTER
1798     AVFilterGraph *graph = avfilter_graph_alloc();
1799     AVFilterContext *filt_out = NULL;
1800
1801     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1802         goto the_end;
1803     filt_out = is->out_video_filter;
1804 #endif
1805
1806     for(;;) {
1807 #if !CONFIG_AVFILTER
1808         AVPacket pkt;
1809 #else
1810         AVFilterBufferRef *picref;
1811         AVRational tb;
1812 #endif
1813         while (is->paused && !is->videoq.abort_request)
1814             SDL_Delay(10);
1815 #if CONFIG_AVFILTER
1816         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1817         if (picref) {
1818             pts_int = picref->pts;
1819             pos     = picref->pos;
1820             frame->opaque = picref;
1821         }
1822
1823         if (av_cmp_q(tb, is->video_st->time_base)) {
1824             av_unused int64_t pts1 = pts_int;
1825             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1826             av_dlog(NULL, "video_thread(): "
1827                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1828                     tb.num, tb.den, pts1,
1829                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1830         }
1831 #else
1832         ret = get_video_frame(is, frame, &pts_int, &pkt);
1833         pos = pkt.pos;
1834 #endif
1835
1836         if (ret < 0) goto the_end;
1837
1838         if (!ret)
1839             continue;
1840
1841         pts = pts_int*av_q2d(is->video_st->time_base);
1842
1843         ret = queue_picture(is, frame, pts, pos);
1844 #if !CONFIG_AVFILTER
1845         av_free_packet(&pkt);
1846 #endif
1847         if (ret < 0)
1848             goto the_end;
1849
1850         if (step)
1851             if (cur_stream)
1852                 stream_toggle_pause(cur_stream);
1853     }
1854  the_end:
1855 #if CONFIG_AVFILTER
1856     avfilter_graph_free(&graph);
1857 #endif
1858     av_free(frame);
1859     return 0;
1860 }
1861
1862 static int subtitle_thread(void *arg)
1863 {
1864     VideoState *is = arg;
1865     SubPicture *sp;
1866     AVPacket pkt1, *pkt = &pkt1;
1867     int len1, got_subtitle;
1868     double pts;
1869     int i, j;
1870     int r, g, b, y, u, v, a;
1871
1872     for(;;) {
1873         while (is->paused && !is->subtitleq.abort_request) {
1874             SDL_Delay(10);
1875         }
1876         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1877             break;
1878
1879         if(pkt->data == flush_pkt.data){
1880             avcodec_flush_buffers(is->subtitle_st->codec);
1881             continue;
1882         }
1883         SDL_LockMutex(is->subpq_mutex);
1884         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1885                !is->subtitleq.abort_request) {
1886             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1887         }
1888         SDL_UnlockMutex(is->subpq_mutex);
1889
1890         if (is->subtitleq.abort_request)
1891             goto the_end;
1892
1893         sp = &is->subpq[is->subpq_windex];
1894
1895        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1896            this packet, if any */
1897         pts = 0;
1898         if (pkt->pts != AV_NOPTS_VALUE)
1899             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1900
1901         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1902                                     &sp->sub, &got_subtitle,
1903                                     pkt);
1904 //            if (len1 < 0)
1905 //                break;
1906         if (got_subtitle && sp->sub.format == 0) {
1907             sp->pts = pts;
1908
1909             for (i = 0; i < sp->sub.num_rects; i++)
1910             {
1911                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1912                 {
1913                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1914                     y = RGB_TO_Y_CCIR(r, g, b);
1915                     u = RGB_TO_U_CCIR(r, g, b, 0);
1916                     v = RGB_TO_V_CCIR(r, g, b, 0);
1917                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1918                 }
1919             }
1920
1921             /* now we can update the picture count */
1922             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1923                 is->subpq_windex = 0;
1924             SDL_LockMutex(is->subpq_mutex);
1925             is->subpq_size++;
1926             SDL_UnlockMutex(is->subpq_mutex);
1927         }
1928         av_free_packet(pkt);
1929 //        if (step)
1930 //            if (cur_stream)
1931 //                stream_toggle_pause(cur_stream);
1932     }
1933  the_end:
1934     return 0;
1935 }
1936
1937 /* copy samples for viewing in editor window */
1938 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1939 {
1940     int size, len;
1941
1942     size = samples_size / sizeof(short);
1943     while (size > 0) {
1944         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1945         if (len > size)
1946             len = size;
1947         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1948         samples += len;
1949         is->sample_array_index += len;
1950         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1951             is->sample_array_index = 0;
1952         size -= len;
1953     }
1954 }
1955
1956 /* return the new audio buffer size (samples can be added or deleted
1957    to get better sync if video or external master clock) */
1958 static int synchronize_audio(VideoState *is, short *samples,
1959                              int samples_size1, double pts)
1960 {
1961     int n, samples_size;
1962     double ref_clock;
1963
1964     n = 2 * is->audio_st->codec->channels;
1965     samples_size = samples_size1;
1966
1967     /* if not master, then we try to remove or add samples to correct the clock */
1968     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1969          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1970         double diff, avg_diff;
1971         int wanted_size, min_size, max_size, nb_samples;
1972
1973         ref_clock = get_master_clock(is);
1974         diff = get_audio_clock(is) - ref_clock;
1975
1976         if (diff < AV_NOSYNC_THRESHOLD) {
1977             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1978             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1979                 /* not enough measures to have a correct estimate */
1980                 is->audio_diff_avg_count++;
1981             } else {
1982                 /* estimate the A-V difference */
1983                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1984
1985                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1986                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1987                     nb_samples = samples_size / n;
1988
1989                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1990                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1991                     if (wanted_size < min_size)
1992                         wanted_size = min_size;
1993                     else if (wanted_size > max_size)
1994                         wanted_size = max_size;
1995
1996                     /* add or remove samples to correction the synchro */
1997                     if (wanted_size < samples_size) {
1998                         /* remove samples */
1999                         samples_size = wanted_size;
2000                     } else if (wanted_size > samples_size) {
2001                         uint8_t *samples_end, *q;
2002                         int nb;
2003
2004                         /* add samples */
2005                         nb = (samples_size - wanted_size);
2006                         samples_end = (uint8_t *)samples + samples_size - n;
2007                         q = samples_end + n;
2008                         while (nb > 0) {
2009                             memcpy(q, samples_end, n);
2010                             q += n;
2011                             nb -= n;
2012                         }
2013                         samples_size = wanted_size;
2014                     }
2015                 }
2016 #if 0
2017                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2018                        diff, avg_diff, samples_size - samples_size1,
2019                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2020 #endif
2021             }
2022         } else {
2023             /* too big difference : may be initial PTS errors, so
2024                reset A-V filter */
2025             is->audio_diff_avg_count = 0;
2026             is->audio_diff_cum = 0;
2027         }
2028     }
2029
2030     return samples_size;
2031 }
2032
2033 /* decode one audio frame and returns its uncompressed size */
2034 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2035 {
2036     AVPacket *pkt_temp = &is->audio_pkt_temp;
2037     AVPacket *pkt = &is->audio_pkt;
2038     AVCodecContext *dec= is->audio_st->codec;
2039     int n, len1, data_size;
2040     double pts;
2041
2042     for(;;) {
2043         /* NOTE: the audio packet can contain several frames */
2044         while (pkt_temp->size > 0) {
2045             data_size = sizeof(is->audio_buf1);
2046             len1 = avcodec_decode_audio3(dec,
2047                                         (int16_t *)is->audio_buf1, &data_size,
2048                                         pkt_temp);
2049             if (len1 < 0) {
2050                 /* if error, we skip the frame */
2051                 pkt_temp->size = 0;
2052                 break;
2053             }
2054
2055             pkt_temp->data += len1;
2056             pkt_temp->size -= len1;
2057             if (data_size <= 0)
2058                 continue;
2059
2060             if (dec->sample_fmt != is->audio_src_fmt) {
2061                 if (is->reformat_ctx)
2062                     av_audio_convert_free(is->reformat_ctx);
2063                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2064                                                          dec->sample_fmt, 1, NULL, 0);
2065                 if (!is->reformat_ctx) {
2066                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2067                         av_get_sample_fmt_name(dec->sample_fmt),
2068                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2069                         break;
2070                 }
2071                 is->audio_src_fmt= dec->sample_fmt;
2072             }
2073
2074             if (is->reformat_ctx) {
2075                 const void *ibuf[6]= {is->audio_buf1};
2076                 void *obuf[6]= {is->audio_buf2};
2077                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2078                 int ostride[6]= {2};
2079                 int len= data_size/istride[0];
2080                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2081                     printf("av_audio_convert() failed\n");
2082                     break;
2083                 }
2084                 is->audio_buf= is->audio_buf2;
2085                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2086                           remove this legacy cruft */
2087                 data_size= len*2;
2088             }else{
2089                 is->audio_buf= is->audio_buf1;
2090             }
2091
2092             /* if no pts, then compute it */
2093             pts = is->audio_clock;
2094             *pts_ptr = pts;
2095             n = 2 * dec->channels;
2096             is->audio_clock += (double)data_size /
2097                 (double)(n * dec->sample_rate);
2098 #if defined(DEBUG_SYNC)
2099             {
2100                 static double last_clock;
2101                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2102                        is->audio_clock - last_clock,
2103                        is->audio_clock, pts);
2104                 last_clock = is->audio_clock;
2105             }
2106 #endif
2107             return data_size;
2108         }
2109
2110         /* free the current packet */
2111         if (pkt->data)
2112             av_free_packet(pkt);
2113
2114         if (is->paused || is->audioq.abort_request) {
2115             return -1;
2116         }
2117
2118         /* read next packet */
2119         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2120             return -1;
2121         if(pkt->data == flush_pkt.data){
2122             avcodec_flush_buffers(dec);
2123             continue;
2124         }
2125
2126         pkt_temp->data = pkt->data;
2127         pkt_temp->size = pkt->size;
2128
2129         /* if update the audio clock with the pts */
2130         if (pkt->pts != AV_NOPTS_VALUE) {
2131             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2132         }
2133     }
2134 }
2135
2136 /* prepare a new audio buffer */
2137 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2138 {
2139     VideoState *is = opaque;
2140     int audio_size, len1;
2141     double pts;
2142
2143     audio_callback_time = av_gettime();
2144
2145     while (len > 0) {
2146         if (is->audio_buf_index >= is->audio_buf_size) {
2147            audio_size = audio_decode_frame(is, &pts);
2148            if (audio_size < 0) {
2149                 /* if error, just output silence */
2150                is->audio_buf = is->audio_buf1;
2151                is->audio_buf_size = 1024;
2152                memset(is->audio_buf, 0, is->audio_buf_size);
2153            } else {
2154                if (is->show_mode != SHOW_MODE_VIDEO)
2155                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2156                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2157                                               pts);
2158                is->audio_buf_size = audio_size;
2159            }
2160            is->audio_buf_index = 0;
2161         }
2162         len1 = is->audio_buf_size - is->audio_buf_index;
2163         if (len1 > len)
2164             len1 = len;
2165         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2166         len -= len1;
2167         stream += len1;
2168         is->audio_buf_index += len1;
2169     }
2170 }
2171
2172 /* open a given stream. Return 0 if OK */
2173 static int stream_component_open(VideoState *is, int stream_index)
2174 {
2175     AVFormatContext *ic = is->ic;
2176     AVCodecContext *avctx;
2177     AVCodec *codec;
2178     SDL_AudioSpec wanted_spec, spec;
2179
2180     if (stream_index < 0 || stream_index >= ic->nb_streams)
2181         return -1;
2182     avctx = ic->streams[stream_index]->codec;
2183
2184     /* prepare audio output */
2185     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2186         if (avctx->channels > 0) {
2187             avctx->request_channels = FFMIN(2, avctx->channels);
2188         } else {
2189             avctx->request_channels = 2;
2190         }
2191     }
2192
2193     codec = avcodec_find_decoder(avctx->codec_id);
2194     avctx->debug_mv = debug_mv;
2195     avctx->debug = debug;
2196     avctx->workaround_bugs = workaround_bugs;
2197     avctx->lowres = lowres;
2198     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2199     avctx->idct_algo= idct;
2200     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2201     avctx->skip_frame= skip_frame;
2202     avctx->skip_idct= skip_idct;
2203     avctx->skip_loop_filter= skip_loop_filter;
2204     avctx->error_recognition= error_recognition;
2205     avctx->error_concealment= error_concealment;
2206     avctx->thread_count= thread_count;
2207
2208     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2209
2210     if (!codec ||
2211         avcodec_open(avctx, codec) < 0)
2212         return -1;
2213
2214     /* prepare audio output */
2215     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2216         wanted_spec.freq = avctx->sample_rate;
2217         wanted_spec.format = AUDIO_S16SYS;
2218         wanted_spec.channels = avctx->channels;
2219         wanted_spec.silence = 0;
2220         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2221         wanted_spec.callback = sdl_audio_callback;
2222         wanted_spec.userdata = is;
2223         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2224             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2225             return -1;
2226         }
2227         is->audio_hw_buf_size = spec.size;
2228         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2229     }
2230
2231     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2232     switch(avctx->codec_type) {
2233     case AVMEDIA_TYPE_AUDIO:
2234         is->audio_stream = stream_index;
2235         is->audio_st = ic->streams[stream_index];
2236         is->audio_buf_size = 0;
2237         is->audio_buf_index = 0;
2238
2239         /* init averaging filter */
2240         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2241         is->audio_diff_avg_count = 0;
2242         /* since we do not have a precise anough audio fifo fullness,
2243            we correct audio sync only if larger than this threshold */
2244         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2245
2246         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2247         packet_queue_init(&is->audioq);
2248         SDL_PauseAudio(0);
2249         break;
2250     case AVMEDIA_TYPE_VIDEO:
2251         is->video_stream = stream_index;
2252         is->video_st = ic->streams[stream_index];
2253
2254 //        is->video_current_pts_time = av_gettime();
2255
2256         packet_queue_init(&is->videoq);
2257         is->video_tid = SDL_CreateThread(video_thread, is);
2258         break;
2259     case AVMEDIA_TYPE_SUBTITLE:
2260         is->subtitle_stream = stream_index;
2261         is->subtitle_st = ic->streams[stream_index];
2262         packet_queue_init(&is->subtitleq);
2263
2264         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2265         break;
2266     default:
2267         break;
2268     }
2269     return 0;
2270 }
2271
2272 static void stream_component_close(VideoState *is, int stream_index)
2273 {
2274     AVFormatContext *ic = is->ic;
2275     AVCodecContext *avctx;
2276
2277     if (stream_index < 0 || stream_index >= ic->nb_streams)
2278         return;
2279     avctx = ic->streams[stream_index]->codec;
2280
2281     switch(avctx->codec_type) {
2282     case AVMEDIA_TYPE_AUDIO:
2283         packet_queue_abort(&is->audioq);
2284
2285         SDL_CloseAudio();
2286
2287         packet_queue_end(&is->audioq);
2288         if (is->reformat_ctx)
2289             av_audio_convert_free(is->reformat_ctx);
2290         is->reformat_ctx = NULL;
2291         break;
2292     case AVMEDIA_TYPE_VIDEO:
2293         packet_queue_abort(&is->videoq);
2294
2295         /* note: we also signal this mutex to make sure we deblock the
2296            video thread in all cases */
2297         SDL_LockMutex(is->pictq_mutex);
2298         SDL_CondSignal(is->pictq_cond);
2299         SDL_UnlockMutex(is->pictq_mutex);
2300
2301         SDL_WaitThread(is->video_tid, NULL);
2302
2303         packet_queue_end(&is->videoq);
2304         break;
2305     case AVMEDIA_TYPE_SUBTITLE:
2306         packet_queue_abort(&is->subtitleq);
2307
2308         /* note: we also signal this mutex to make sure we deblock the
2309            video thread in all cases */
2310         SDL_LockMutex(is->subpq_mutex);
2311         is->subtitle_stream_changed = 1;
2312
2313         SDL_CondSignal(is->subpq_cond);
2314         SDL_UnlockMutex(is->subpq_mutex);
2315
2316         SDL_WaitThread(is->subtitle_tid, NULL);
2317
2318         packet_queue_end(&is->subtitleq);
2319         break;
2320     default:
2321         break;
2322     }
2323
2324     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2325     avcodec_close(avctx);
2326     switch(avctx->codec_type) {
2327     case AVMEDIA_TYPE_AUDIO:
2328         is->audio_st = NULL;
2329         is->audio_stream = -1;
2330         break;
2331     case AVMEDIA_TYPE_VIDEO:
2332         is->video_st = NULL;
2333         is->video_stream = -1;
2334         break;
2335     case AVMEDIA_TYPE_SUBTITLE:
2336         is->subtitle_st = NULL;
2337         is->subtitle_stream = -1;
2338         break;
2339     default:
2340         break;
2341     }
2342 }
2343
2344 /* since we have only one decoding thread, we can use a global
2345    variable instead of a thread local variable */
2346 static VideoState *global_video_state;
2347
2348 static int decode_interrupt_cb(void)
2349 {
2350     return (global_video_state && global_video_state->abort_request);
2351 }
2352
2353 /* this thread gets the stream from the disk or the network */
2354 static int read_thread(void *arg)
2355 {
2356     VideoState *is = arg;
2357     AVFormatContext *ic;
2358     int err, i, ret;
2359     int st_index[AVMEDIA_TYPE_NB];
2360     AVPacket pkt1, *pkt = &pkt1;
2361     AVFormatParameters params, *ap = &params;
2362     int eof=0;
2363     int pkt_in_play_range = 0;
2364
2365     ic = avformat_alloc_context();
2366
2367     memset(st_index, -1, sizeof(st_index));
2368     is->video_stream = -1;
2369     is->audio_stream = -1;
2370     is->subtitle_stream = -1;
2371
2372     global_video_state = is;
2373     avio_set_interrupt_cb(decode_interrupt_cb);
2374
2375     memset(ap, 0, sizeof(*ap));
2376
2377     ap->prealloced_context = 1;
2378     ap->width = frame_width;
2379     ap->height= frame_height;
2380     ap->time_base= (AVRational){1, 25};
2381     ap->pix_fmt = frame_pix_fmt;
2382     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2383
2384
2385     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2386     if (err >= 0) {
2387         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2388         err = av_demuxer_open(ic, ap);
2389         if(err < 0){
2390             avformat_free_context(ic);
2391             ic= NULL;
2392         }
2393     }
2394     if (err < 0) {
2395         print_error(is->filename, err);
2396         ret = -1;
2397         goto fail;
2398     }
2399     is->ic = ic;
2400
2401     if(genpts)
2402         ic->flags |= AVFMT_FLAG_GENPTS;
2403
2404     err = av_find_stream_info(ic);
2405     if (err < 0) {
2406         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2407         ret = -1;
2408         goto fail;
2409     }
2410     if(ic->pb)
2411         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2412
2413     if(seek_by_bytes<0)
2414         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2415
2416     /* if seeking requested, we execute it */
2417     if (start_time != AV_NOPTS_VALUE) {
2418         int64_t timestamp;
2419
2420         timestamp = start_time;
2421         /* add the stream start time */
2422         if (ic->start_time != AV_NOPTS_VALUE)
2423             timestamp += ic->start_time;
2424         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2425         if (ret < 0) {
2426             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2427                     is->filename, (double)timestamp / AV_TIME_BASE);
2428         }
2429     }
2430
2431     for (i = 0; i < ic->nb_streams; i++)
2432         ic->streams[i]->discard = AVDISCARD_ALL;
2433     if (!video_disable)
2434         st_index[AVMEDIA_TYPE_VIDEO] =
2435             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2436                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2437     if (!audio_disable)
2438         st_index[AVMEDIA_TYPE_AUDIO] =
2439             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2440                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2441                                 st_index[AVMEDIA_TYPE_VIDEO],
2442                                 NULL, 0);
2443     if (!video_disable)
2444         st_index[AVMEDIA_TYPE_SUBTITLE] =
2445             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2446                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2447                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2448                                  st_index[AVMEDIA_TYPE_AUDIO] :
2449                                  st_index[AVMEDIA_TYPE_VIDEO]),
2450                                 NULL, 0);
2451     if (show_status) {
2452         av_dump_format(ic, 0, is->filename, 0);
2453     }
2454
2455     is->show_mode = show_mode;
2456
2457     /* open the streams */
2458     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2459         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2460     }
2461
2462     ret=-1;
2463     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2464         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2465     }
2466     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2467     if (is->show_mode == SHOW_MODE_NONE)
2468         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2469
2470     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2471         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2472     }
2473
2474     if (is->video_stream < 0 && is->audio_stream < 0) {
2475         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2476         ret = -1;
2477         goto fail;
2478     }
2479
2480     for(;;) {
2481         if (is->abort_request)
2482             break;
2483         if (is->paused != is->last_paused) {
2484             is->last_paused = is->paused;
2485             if (is->paused)
2486                 is->read_pause_return= av_read_pause(ic);
2487             else
2488                 av_read_play(ic);
2489         }
2490 #if CONFIG_RTSP_DEMUXER
2491         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2492             /* wait 10 ms to avoid trying to get another packet */
2493             /* XXX: horrible */
2494             SDL_Delay(10);
2495             continue;
2496         }
2497 #endif
2498         if (is->seek_req) {
2499             int64_t seek_target= is->seek_pos;
2500             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2501             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2502 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2503 //      of the seek_pos/seek_rel variables
2504
2505             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2506             if (ret < 0) {
2507                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2508             }else{
2509                 if (is->audio_stream >= 0) {
2510                     packet_queue_flush(&is->audioq);
2511                     packet_queue_put(&is->audioq, &flush_pkt);
2512                 }
2513                 if (is->subtitle_stream >= 0) {
2514                     packet_queue_flush(&is->subtitleq);
2515                     packet_queue_put(&is->subtitleq, &flush_pkt);
2516                 }
2517                 if (is->video_stream >= 0) {
2518                     packet_queue_flush(&is->videoq);
2519                     packet_queue_put(&is->videoq, &flush_pkt);
2520                 }
2521             }
2522             is->seek_req = 0;
2523             eof= 0;
2524         }
2525
2526         /* if the queue are full, no need to read more */
2527         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2528             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2529                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2530                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2531             /* wait 10 ms */
2532             SDL_Delay(10);
2533             continue;
2534         }
2535         if(eof) {
2536             if(is->video_stream >= 0){
2537                 av_init_packet(pkt);
2538                 pkt->data=NULL;
2539                 pkt->size=0;
2540                 pkt->stream_index= is->video_stream;
2541                 packet_queue_put(&is->videoq, pkt);
2542             }
2543             SDL_Delay(10);
2544             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2545                 if(loop!=1 && (!loop || --loop)){
2546                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2547                 }else if(autoexit){
2548                     ret=AVERROR_EOF;
2549                     goto fail;
2550                 }
2551             }
2552             eof=0;
2553             continue;
2554         }
2555         ret = av_read_frame(ic, pkt);
2556         if (ret < 0) {
2557             if (ret == AVERROR_EOF || url_feof(ic->pb))
2558                 eof=1;
2559             if (ic->pb && ic->pb->error)
2560                 break;
2561             SDL_Delay(100); /* wait for user event */
2562             continue;
2563         }
2564         /* check if packet is in play range specified by user, then queue, otherwise discard */
2565         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2566                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2567                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2568                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2569                 <= ((double)duration/1000000);
2570         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2571             packet_queue_put(&is->audioq, pkt);
2572         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2573             packet_queue_put(&is->videoq, pkt);
2574         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2575             packet_queue_put(&is->subtitleq, pkt);
2576         } else {
2577             av_free_packet(pkt);
2578         }
2579     }
2580     /* wait until the end */
2581     while (!is->abort_request) {
2582         SDL_Delay(100);
2583     }
2584
2585     ret = 0;
2586  fail:
2587     /* disable interrupting */
2588     global_video_state = NULL;
2589
2590     /* close each stream */
2591     if (is->audio_stream >= 0)
2592         stream_component_close(is, is->audio_stream);
2593     if (is->video_stream >= 0)
2594         stream_component_close(is, is->video_stream);
2595     if (is->subtitle_stream >= 0)
2596         stream_component_close(is, is->subtitle_stream);
2597     if (is->ic) {
2598         av_close_input_file(is->ic);
2599         is->ic = NULL; /* safety */
2600     }
2601     avio_set_interrupt_cb(NULL);
2602
2603     if (ret != 0) {
2604         SDL_Event event;
2605
2606         event.type = FF_QUIT_EVENT;
2607         event.user.data1 = is;
2608         SDL_PushEvent(&event);
2609     }
2610     return 0;
2611 }
2612
2613 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2614 {
2615     VideoState *is;
2616
2617     is = av_mallocz(sizeof(VideoState));
2618     if (!is)
2619         return NULL;
2620     av_strlcpy(is->filename, filename, sizeof(is->filename));
2621     is->iformat = iformat;
2622     is->ytop = 0;
2623     is->xleft = 0;
2624
2625     /* start video display */
2626     is->pictq_mutex = SDL_CreateMutex();
2627     is->pictq_cond = SDL_CreateCond();
2628
2629     is->subpq_mutex = SDL_CreateMutex();
2630     is->subpq_cond = SDL_CreateCond();
2631
2632     is->av_sync_type = av_sync_type;
2633     is->read_tid = SDL_CreateThread(read_thread, is);
2634     if (!is->read_tid) {
2635         av_free(is);
2636         return NULL;
2637     }
2638     return is;
2639 }
2640
2641 static void stream_cycle_channel(VideoState *is, int codec_type)
2642 {
2643     AVFormatContext *ic = is->ic;
2644     int start_index, stream_index;
2645     AVStream *st;
2646
2647     if (codec_type == AVMEDIA_TYPE_VIDEO)
2648         start_index = is->video_stream;
2649     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2650         start_index = is->audio_stream;
2651     else
2652         start_index = is->subtitle_stream;
2653     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2654         return;
2655     stream_index = start_index;
2656     for(;;) {
2657         if (++stream_index >= is->ic->nb_streams)
2658         {
2659             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2660             {
2661                 stream_index = -1;
2662                 goto the_end;
2663             } else
2664                 stream_index = 0;
2665         }
2666         if (stream_index == start_index)
2667             return;
2668         st = ic->streams[stream_index];
2669         if (st->codec->codec_type == codec_type) {
2670             /* check that parameters are OK */
2671             switch(codec_type) {
2672             case AVMEDIA_TYPE_AUDIO:
2673                 if (st->codec->sample_rate != 0 &&
2674                     st->codec->channels != 0)
2675                     goto the_end;
2676                 break;
2677             case AVMEDIA_TYPE_VIDEO:
2678             case AVMEDIA_TYPE_SUBTITLE:
2679                 goto the_end;
2680             default:
2681                 break;
2682             }
2683         }
2684     }
2685  the_end:
2686     stream_component_close(is, start_index);
2687     stream_component_open(is, stream_index);
2688 }
2689
2690
2691 static void toggle_full_screen(void)
2692 {
2693     is_full_screen = !is_full_screen;
2694     if (!fs_screen_width) {
2695         /* use default SDL method */
2696 //        SDL_WM_ToggleFullScreen(screen);
2697     }
2698     video_open(cur_stream);
2699 }
2700
2701 static void toggle_pause(void)
2702 {
2703     if (cur_stream)
2704         stream_toggle_pause(cur_stream);
2705     step = 0;
2706 }
2707
2708 static void step_to_next_frame(void)
2709 {
2710     if (cur_stream) {
2711         /* if the stream is paused unpause it, then step */
2712         if (cur_stream->paused)
2713             stream_toggle_pause(cur_stream);
2714     }
2715     step = 1;
2716 }
2717
2718 static void toggle_audio_display(void)
2719 {
2720     if (cur_stream) {
2721         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2722         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2723         fill_rectangle(screen,
2724                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2725                     bgcolor);
2726         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2727     }
2728 }
2729
2730 /* handle an event sent by the GUI */
2731 static void event_loop(void)
2732 {
2733     SDL_Event event;
2734     double incr, pos, frac;
2735
2736     for(;;) {
2737         double x;
2738         SDL_WaitEvent(&event);
2739         switch(event.type) {
2740         case SDL_KEYDOWN:
2741             if (exit_on_keydown) {
2742                 do_exit();
2743                 break;
2744             }
2745             switch(event.key.keysym.sym) {
2746             case SDLK_ESCAPE:
2747             case SDLK_q:
2748                 do_exit();
2749                 break;
2750             case SDLK_f:
2751                 toggle_full_screen();
2752                 break;
2753             case SDLK_p:
2754             case SDLK_SPACE:
2755                 toggle_pause();
2756                 break;
2757             case SDLK_s: //S: Step to next frame
2758                 step_to_next_frame();
2759                 break;
2760             case SDLK_a:
2761                 if (cur_stream)
2762                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2763                 break;
2764             case SDLK_v:
2765                 if (cur_stream)
2766                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2767                 break;
2768             case SDLK_t:
2769                 if (cur_stream)
2770                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2771                 break;
2772             case SDLK_w:
2773                 toggle_audio_display();
2774                 break;
2775             case SDLK_LEFT:
2776                 incr = -10.0;
2777                 goto do_seek;
2778             case SDLK_RIGHT:
2779                 incr = 10.0;
2780                 goto do_seek;
2781             case SDLK_UP:
2782                 incr = 60.0;
2783                 goto do_seek;
2784             case SDLK_DOWN:
2785                 incr = -60.0;
2786             do_seek:
2787                 if (cur_stream) {
2788                     if (seek_by_bytes) {
2789                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2790                             pos= cur_stream->video_current_pos;
2791                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2792                             pos= cur_stream->audio_pkt.pos;
2793                         }else
2794                             pos = avio_tell(cur_stream->ic->pb);
2795                         if (cur_stream->ic->bit_rate)
2796                             incr *= cur_stream->ic->bit_rate / 8.0;
2797                         else
2798                             incr *= 180000.0;
2799                         pos += incr;
2800                         stream_seek(cur_stream, pos, incr, 1);
2801                     } else {
2802                         pos = get_master_clock(cur_stream);
2803                         pos += incr;
2804                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2805                     }
2806                 }
2807                 break;
2808             default:
2809                 break;
2810             }
2811             break;
2812         case SDL_MOUSEBUTTONDOWN:
2813             if (exit_on_mousedown) {
2814                 do_exit();
2815                 break;
2816             }
2817         case SDL_MOUSEMOTION:
2818             if(event.type ==SDL_MOUSEBUTTONDOWN){
2819                 x= event.button.x;
2820             }else{
2821                 if(event.motion.state != SDL_PRESSED)
2822                     break;
2823                 x= event.motion.x;
2824             }
2825             if (cur_stream) {
2826                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2827                     uint64_t size=  avio_size(cur_stream->ic->pb);
2828                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2829                 }else{
2830                     int64_t ts;
2831                     int ns, hh, mm, ss;
2832                     int tns, thh, tmm, tss;
2833                     tns = cur_stream->ic->duration/1000000LL;
2834                     thh = tns/3600;
2835                     tmm = (tns%3600)/60;
2836                     tss = (tns%60);
2837                     frac = x/cur_stream->width;
2838                     ns = frac*tns;
2839                     hh = ns/3600;
2840                     mm = (ns%3600)/60;
2841                     ss = (ns%60);
2842                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2843                             hh, mm, ss, thh, tmm, tss);
2844                     ts = frac*cur_stream->ic->duration;
2845                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2846                         ts += cur_stream->ic->start_time;
2847                     stream_seek(cur_stream, ts, 0, 0);
2848                 }
2849             }
2850             break;
2851         case SDL_VIDEORESIZE:
2852             if (cur_stream) {
2853                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2854                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2855                 screen_width = cur_stream->width = event.resize.w;
2856                 screen_height= cur_stream->height= event.resize.h;
2857             }
2858             break;
2859         case SDL_QUIT:
2860         case FF_QUIT_EVENT:
2861             do_exit();
2862             break;
2863         case FF_ALLOC_EVENT:
2864             video_open(event.user.data1);
2865             alloc_picture(event.user.data1);
2866             break;
2867         case FF_REFRESH_EVENT:
2868             video_refresh(event.user.data1);
2869             cur_stream->refresh=0;
2870             break;
2871         default:
2872             break;
2873         }
2874     }
2875 }
2876
2877 static void opt_frame_size(const char *arg)
2878 {
2879     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2880         fprintf(stderr, "Incorrect frame size\n");
2881         exit(1);
2882     }
2883     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2884         fprintf(stderr, "Frame size must be a multiple of 2\n");
2885         exit(1);
2886     }
2887 }
2888
2889 static int opt_width(const char *opt, const char *arg)
2890 {
2891     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2892     return 0;
2893 }
2894
2895 static int opt_height(const char *opt, const char *arg)
2896 {
2897     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2898     return 0;
2899 }
2900
2901 static void opt_format(const char *arg)
2902 {
2903     file_iformat = av_find_input_format(arg);
2904     if (!file_iformat) {
2905         fprintf(stderr, "Unknown input format: %s\n", arg);
2906         exit(1);
2907     }
2908 }
2909
2910 static void opt_frame_pix_fmt(const char *arg)
2911 {
2912     frame_pix_fmt = av_get_pix_fmt(arg);
2913 }
2914
2915 static int opt_sync(const char *opt, const char *arg)
2916 {
2917     if (!strcmp(arg, "audio"))
2918         av_sync_type = AV_SYNC_AUDIO_MASTER;
2919     else if (!strcmp(arg, "video"))
2920         av_sync_type = AV_SYNC_VIDEO_MASTER;
2921     else if (!strcmp(arg, "ext"))
2922         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2923     else {
2924         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2925         exit(1);
2926     }
2927     return 0;
2928 }
2929
2930 static int opt_seek(const char *opt, const char *arg)
2931 {
2932     start_time = parse_time_or_die(opt, arg, 1);
2933     return 0;
2934 }
2935
2936 static int opt_duration(const char *opt, const char *arg)
2937 {
2938     duration = parse_time_or_die(opt, arg, 1);
2939     return 0;
2940 }
2941
2942 static int opt_debug(const char *opt, const char *arg)
2943 {
2944     av_log_set_level(99);
2945     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2946     return 0;
2947 }
2948
2949 static int opt_vismv(const char *opt, const char *arg)
2950 {
2951     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2952     return 0;
2953 }
2954
2955 static int opt_thread_count(const char *opt, const char *arg)
2956 {
2957     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2958 #if !HAVE_THREADS
2959     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2960 #endif
2961     return 0;
2962 }
2963
2964 static int opt_show_mode(const char *opt, const char *arg)
2965 {
2966     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2967                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2968                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2969                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2970     return 0;
2971 }
2972
2973 static const OptionDef options[] = {
2974 #include "cmdutils_common_opts.h"
2975     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2976     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2977     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2978     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2979     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2980     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2981     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2982     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2983     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2984     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2985     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2986     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2987     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2988     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2989     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2990     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2991     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2992     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2993     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2994     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2995     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2996     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2997     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2998     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2999     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3000     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3001     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3002     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3003     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3004     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3005     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3006     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3007     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3008     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3009     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3010     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3011     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3012 #if CONFIG_AVFILTER
3013     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3014 #endif
3015     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3016     { "showmode", HAS_ARG | OPT_FUNC2, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3017     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3018     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3019     { NULL, },
3020 };
3021
3022 static void show_usage(void)
3023 {
3024     printf("Simple media player\n");
3025     printf("usage: ffplay [options] input_file\n");
3026     printf("\n");
3027 }
3028
3029 static void show_help(void)
3030 {
3031     av_log_set_callback(log_callback_help);
3032     show_usage();
3033     show_help_options(options, "Main options:\n",
3034                       OPT_EXPERT, 0);
3035     show_help_options(options, "\nAdvanced options:\n",
3036                       OPT_EXPERT, OPT_EXPERT);
3037     printf("\n");
3038     av_opt_show2(avcodec_opts[0], NULL,
3039                  AV_OPT_FLAG_DECODING_PARAM, 0);
3040     printf("\n");
3041     av_opt_show2(avformat_opts, NULL,
3042                  AV_OPT_FLAG_DECODING_PARAM, 0);
3043 #if !CONFIG_AVFILTER
3044     printf("\n");
3045     av_opt_show2(sws_opts, NULL,
3046                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3047 #endif
3048     printf("\nWhile playing:\n"
3049            "q, ESC              quit\n"
3050            "f                   toggle full screen\n"
3051            "p, SPC              pause\n"
3052            "a                   cycle audio channel\n"
3053            "v                   cycle video channel\n"
3054            "t                   cycle subtitle channel\n"
3055            "w                   show audio waves\n"
3056            "s                   activate frame-step mode\n"
3057            "left/right          seek backward/forward 10 seconds\n"
3058            "down/up             seek backward/forward 1 minute\n"
3059            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3060            );
3061 }
3062
3063 static void opt_input_file(const char *filename)
3064 {
3065     if (input_filename) {
3066         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3067                 filename, input_filename);
3068         exit(1);
3069     }
3070     if (!strcmp(filename, "-"))
3071         filename = "pipe:";
3072     input_filename = filename;
3073 }
3074
3075 /* Called from the main */
3076 int main(int argc, char **argv)
3077 {
3078     int flags;
3079
3080     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3081
3082     /* register all codecs, demux and protocols */
3083     avcodec_register_all();
3084 #if CONFIG_AVDEVICE
3085     avdevice_register_all();
3086 #endif
3087 #if CONFIG_AVFILTER
3088     avfilter_register_all();
3089 #endif
3090     av_register_all();
3091
3092     init_opts();
3093
3094     show_banner();
3095
3096     parse_options(argc, argv, options, opt_input_file);
3097
3098     if (!input_filename) {
3099         show_usage();
3100         fprintf(stderr, "An input file must be specified\n");
3101         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3102         exit(1);
3103     }
3104
3105     if (display_disable) {
3106         video_disable = 1;
3107     }
3108     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3109 #if !defined(__MINGW32__) && !defined(__APPLE__)
3110     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3111 #endif
3112     if (SDL_Init (flags)) {
3113         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3114         exit(1);
3115     }
3116
3117     if (!display_disable) {
3118 #if HAVE_SDL_VIDEO_SIZE
3119         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3120         fs_screen_width = vi->current_w;
3121         fs_screen_height = vi->current_h;
3122 #endif
3123     }
3124
3125     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3126     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3127     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3128
3129     av_init_packet(&flush_pkt);
3130     flush_pkt.data= "FLUSH";
3131
3132     cur_stream = stream_open(input_filename, file_iformat);
3133
3134     event_loop();
3135
3136     /* never returns */
3137
3138     return 0;
3139 }