ffplay: only request 4 or 6 channels from SDL, if SDL version is at least 1.2.8
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavutil/opt.h"
44 #include "libavcodec/avfft.h"
45 #include "libswresample/swresample.h"
46
47 #if CONFIG_AVFILTER
48 # include "libavfilter/avcodec.h"
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/avfiltergraph.h"
51 # include "libavfilter/buffersink.h"
52 #endif
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58
59 #include <unistd.h>
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     int64_t pos;                                 ///< byte position in file
103     int skip;
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     AVRational sample_aspect_ratio;
107     int allocated;
108     int reallocate;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *read_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int force_refresh;
135     int paused;
136     int last_paused;
137     int seek_req;
138     int seek_flags;
139     int64_t seek_pos;
140     int64_t seek_rel;
141     int read_pause_return;
142     AVFormatContext *ic;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
159     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
160     uint8_t *audio_buf;
161     uint8_t *audio_buf1;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     int audio_write_buf_size;
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     enum AVSampleFormat audio_tgt_fmt;
169     int audio_src_channels;
170     int audio_tgt_channels;
171     int64_t audio_src_channel_layout;
172     int64_t audio_tgt_channel_layout;
173     int audio_src_freq;
174     int audio_tgt_freq;
175     struct SwrContext *swr_ctx;
176     double audio_current_pts;
177     double audio_current_pts_drift;
178     int frame_drops_early;
179     int frame_drops_late;
180     AVFrame *frame;
181
182     enum ShowMode {
183         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
184     } show_mode;
185     int16_t sample_array[SAMPLE_ARRAY_SIZE];
186     int sample_array_index;
187     int last_i_start;
188     RDFTContext *rdft;
189     int rdft_bits;
190     FFTSample *rdft_data;
191     int xpos;
192
193     SDL_Thread *subtitle_tid;
194     int subtitle_stream;
195     int subtitle_stream_changed;
196     AVStream *subtitle_st;
197     PacketQueue subtitleq;
198     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
199     int subpq_size, subpq_rindex, subpq_windex;
200     SDL_mutex *subpq_mutex;
201     SDL_cond *subpq_cond;
202
203     double frame_timer;
204     double frame_last_pts;
205     double frame_last_duration;
206     double frame_last_dropped_pts;
207     double frame_last_returned_time;
208     double frame_last_filter_delay;
209     int64_t frame_last_dropped_pos;
210     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
211     int video_stream;
212     AVStream *video_st;
213     PacketQueue videoq;
214     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
215     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
216     int64_t video_current_pos;                   ///< current displayed file pos
217     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
218     int pictq_size, pictq_rindex, pictq_windex;
219     SDL_mutex *pictq_mutex;
220     SDL_cond *pictq_cond;
221 #if !CONFIG_AVFILTER
222     struct SwsContext *img_convert_ctx;
223 #endif
224
225     char filename[1024];
226     int width, height, xleft, ytop;
227     int step;
228
229 #if CONFIG_AVFILTER
230     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
231 #endif
232
233     int refresh;
234     int last_video_stream, last_audio_stream, last_subtitle_stream;
235 } VideoState;
236
237 typedef struct AllocEventProps {
238     VideoState *is;
239     AVFrame *frame;
240 } AllocEventProps;
241
242 static int opt_help(const char *opt, const char *arg);
243
244 /* options specified by the user */
245 static AVInputFormat *file_iformat;
246 static const char *input_filename;
247 static const char *window_title;
248 static int fs_screen_width;
249 static int fs_screen_height;
250 static int screen_width  = 0;
251 static int screen_height = 0;
252 static int audio_disable;
253 static int video_disable;
254 static int wanted_stream[AVMEDIA_TYPE_NB] = {
255     [AVMEDIA_TYPE_AUDIO]    = -1,
256     [AVMEDIA_TYPE_VIDEO]    = -1,
257     [AVMEDIA_TYPE_SUBTITLE] = -1,
258 };
259 static int seek_by_bytes = -1;
260 static int display_disable;
261 static int show_status = 1;
262 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
263 static int64_t start_time = AV_NOPTS_VALUE;
264 static int64_t duration = AV_NOPTS_VALUE;
265 static int workaround_bugs = 1;
266 static int fast = 0;
267 static int genpts = 0;
268 static int lowres = 0;
269 static int idct = FF_IDCT_AUTO;
270 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
271 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
272 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
273 static int error_concealment = 3;
274 static int decoder_reorder_pts = -1;
275 static int autoexit;
276 static int exit_on_keydown;
277 static int exit_on_mousedown;
278 static int loop = 1;
279 static int framedrop = -1;
280 static enum ShowMode show_mode = SHOW_MODE_NONE;
281 static const char *audio_codec_name;
282 static const char *subtitle_codec_name;
283 static const char *video_codec_name;
284 static int rdftspeed = 20;
285 #if CONFIG_AVFILTER
286 static char *vfilters = NULL;
287 #endif
288
289 /* current context */
290 static int is_full_screen;
291 static int64_t audio_callback_time;
292
293 static AVPacket flush_pkt;
294
295 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
296 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
297 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
298
299 static SDL_Surface *screen;
300
301 void av_noreturn exit_program(int ret)
302 {
303     exit(ret);
304 }
305
306 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
307 {
308     AVPacketList *pkt1;
309
310     if (q->abort_request)
311        return -1;
312
313     pkt1 = av_malloc(sizeof(AVPacketList));
314     if (!pkt1)
315         return -1;
316     pkt1->pkt = *pkt;
317     pkt1->next = NULL;
318
319     if (!q->last_pkt)
320         q->first_pkt = pkt1;
321     else
322         q->last_pkt->next = pkt1;
323     q->last_pkt = pkt1;
324     q->nb_packets++;
325     q->size += pkt1->pkt.size + sizeof(*pkt1);
326     /* XXX: should duplicate packet data in DV case */
327     SDL_CondSignal(q->cond);
328     return 0;
329 }
330
331 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
332 {
333     int ret;
334
335     /* duplicate the packet */
336     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
337         return -1;
338
339     SDL_LockMutex(q->mutex);
340     ret = packet_queue_put_private(q, pkt);
341     SDL_UnlockMutex(q->mutex);
342
343     if (pkt != &flush_pkt && ret < 0)
344         av_free_packet(pkt);
345
346     return ret;
347 }
348
349 /* packet queue handling */
350 static void packet_queue_init(PacketQueue *q)
351 {
352     memset(q, 0, sizeof(PacketQueue));
353     q->mutex = SDL_CreateMutex();
354     q->cond = SDL_CreateCond();
355     q->abort_request = 1;
356 }
357
358 static void packet_queue_flush(PacketQueue *q)
359 {
360     AVPacketList *pkt, *pkt1;
361
362     SDL_LockMutex(q->mutex);
363     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
364         pkt1 = pkt->next;
365         av_free_packet(&pkt->pkt);
366         av_freep(&pkt);
367     }
368     q->last_pkt = NULL;
369     q->first_pkt = NULL;
370     q->nb_packets = 0;
371     q->size = 0;
372     SDL_UnlockMutex(q->mutex);
373 }
374
375 static void packet_queue_destroy(PacketQueue *q)
376 {
377     packet_queue_flush(q);
378     SDL_DestroyMutex(q->mutex);
379     SDL_DestroyCond(q->cond);
380 }
381
382 static void packet_queue_abort(PacketQueue *q)
383 {
384     SDL_LockMutex(q->mutex);
385
386     q->abort_request = 1;
387
388     SDL_CondSignal(q->cond);
389
390     SDL_UnlockMutex(q->mutex);
391 }
392
393 static void packet_queue_start(PacketQueue *q)
394 {
395     SDL_LockMutex(q->mutex);
396     q->abort_request = 0;
397     packet_queue_put_private(q, &flush_pkt);
398     SDL_UnlockMutex(q->mutex);
399 }
400
401 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
402 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
403 {
404     AVPacketList *pkt1;
405     int ret;
406
407     SDL_LockMutex(q->mutex);
408
409     for (;;) {
410         if (q->abort_request) {
411             ret = -1;
412             break;
413         }
414
415         pkt1 = q->first_pkt;
416         if (pkt1) {
417             q->first_pkt = pkt1->next;
418             if (!q->first_pkt)
419                 q->last_pkt = NULL;
420             q->nb_packets--;
421             q->size -= pkt1->pkt.size + sizeof(*pkt1);
422             *pkt = pkt1->pkt;
423             av_free(pkt1);
424             ret = 1;
425             break;
426         } else if (!block) {
427             ret = 0;
428             break;
429         } else {
430             SDL_CondWait(q->cond, q->mutex);
431         }
432     }
433     SDL_UnlockMutex(q->mutex);
434     return ret;
435 }
436
437 static inline void fill_rectangle(SDL_Surface *screen,
438                                   int x, int y, int w, int h, int color)
439 {
440     SDL_Rect rect;
441     rect.x = x;
442     rect.y = y;
443     rect.w = w;
444     rect.h = h;
445     SDL_FillRect(screen, &rect, color);
446 }
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694         if (vp->sample_aspect_ratio.num == 0)
695             aspect_ratio = 0;
696         else
697             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
698
699         if (aspect_ratio <= 0.0)
700             aspect_ratio = 1.0;
701         aspect_ratio *= (float)vp->width / (float)vp->height;
702
703         if (is->subtitle_st) {
704             if (is->subpq_size > 0) {
705                 sp = &is->subpq[is->subpq_rindex];
706
707                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
708                     SDL_LockYUVOverlay (vp->bmp);
709
710                     pict.data[0] = vp->bmp->pixels[0];
711                     pict.data[1] = vp->bmp->pixels[2];
712                     pict.data[2] = vp->bmp->pixels[1];
713
714                     pict.linesize[0] = vp->bmp->pitches[0];
715                     pict.linesize[1] = vp->bmp->pitches[2];
716                     pict.linesize[2] = vp->bmp->pitches[1];
717
718                     for (i = 0; i < sp->sub.num_rects; i++)
719                         blend_subrect(&pict, sp->sub.rects[i],
720                                       vp->bmp->w, vp->bmp->h);
721
722                     SDL_UnlockYUVOverlay (vp->bmp);
723                 }
724             }
725         }
726
727
728         /* XXX: we suppose the screen has a 1.0 pixel ratio */
729         height = is->height;
730         width = ((int)rint(height * aspect_ratio)) & ~1;
731         if (width > is->width) {
732             width = is->width;
733             height = ((int)rint(width / aspect_ratio)) & ~1;
734         }
735         x = (is->width - width) / 2;
736         y = (is->height - height) / 2;
737         is->no_background = 0;
738         rect.x = is->xleft + x;
739         rect.y = is->ytop  + y;
740         rect.w = FFMAX(width,  1);
741         rect.h = FFMAX(height, 1);
742         SDL_DisplayYUVOverlay(vp->bmp, &rect);
743     }
744 }
745
746 static inline int compute_mod(int a, int b)
747 {
748     return a < 0 ? a%b + b : a%b;
749 }
750
751 static void video_audio_display(VideoState *s)
752 {
753     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
754     int ch, channels, h, h2, bgcolor, fgcolor;
755     int16_t time_diff;
756     int rdft_bits, nb_freq;
757
758     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
759         ;
760     nb_freq = 1 << (rdft_bits - 1);
761
762     /* compute display index : center on currently output samples */
763     channels = s->audio_tgt_channels;
764     nb_display_channels = channels;
765     if (!s->paused) {
766         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
767         n = 2 * channels;
768         delay = s->audio_write_buf_size;
769         delay /= n;
770
771         /* to be more precise, we take into account the time spent since
772            the last buffer computation */
773         if (audio_callback_time) {
774             time_diff = av_gettime() - audio_callback_time;
775             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
776         }
777
778         delay += 2 * data_used;
779         if (delay < data_used)
780             delay = data_used;
781
782         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
783         if (s->show_mode == SHOW_MODE_WAVES) {
784             h = INT_MIN;
785             for (i = 0; i < 1000; i += channels) {
786                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
787                 int a = s->sample_array[idx];
788                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
789                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
790                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
791                 int score = a - d;
792                 if (h < score && (b ^ c) < 0) {
793                     h = score;
794                     i_start = idx;
795                 }
796             }
797         }
798
799         s->last_i_start = i_start;
800     } else {
801         i_start = s->last_i_start;
802     }
803
804     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
805     if (s->show_mode == SHOW_MODE_WAVES) {
806         fill_rectangle(screen,
807                        s->xleft, s->ytop, s->width, s->height,
808                        bgcolor);
809
810         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
811
812         /* total height for one channel */
813         h = s->height / nb_display_channels;
814         /* graph height / 2 */
815         h2 = (h * 9) / 20;
816         for (ch = 0; ch < nb_display_channels; ch++) {
817             i = i_start + ch;
818             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
819             for (x = 0; x < s->width; x++) {
820                 y = (s->sample_array[i] * h2) >> 15;
821                 if (y < 0) {
822                     y = -y;
823                     ys = y1 - y;
824                 } else {
825                     ys = y1;
826                 }
827                 fill_rectangle(screen,
828                                s->xleft + x, ys, 1, y,
829                                fgcolor);
830                 i += channels;
831                 if (i >= SAMPLE_ARRAY_SIZE)
832                     i -= SAMPLE_ARRAY_SIZE;
833             }
834         }
835
836         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
837
838         for (ch = 1; ch < nb_display_channels; ch++) {
839             y = s->ytop + ch * h;
840             fill_rectangle(screen,
841                            s->xleft, y, s->width, 1,
842                            fgcolor);
843         }
844         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
845     } else {
846         nb_display_channels= FFMIN(nb_display_channels, 2);
847         if (rdft_bits != s->rdft_bits) {
848             av_rdft_end(s->rdft);
849             av_free(s->rdft_data);
850             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
851             s->rdft_bits = rdft_bits;
852             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
853         }
854         {
855             FFTSample *data[2];
856             for (ch = 0; ch < nb_display_channels; ch++) {
857                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
858                 i = i_start + ch;
859                 for (x = 0; x < 2 * nb_freq; x++) {
860                     double w = (x-nb_freq) * (1.0 / nb_freq);
861                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
862                     i += channels;
863                     if (i >= SAMPLE_ARRAY_SIZE)
864                         i -= SAMPLE_ARRAY_SIZE;
865                 }
866                 av_rdft_calc(s->rdft, data[ch]);
867             }
868             // least efficient way to do this, we should of course directly access it but its more than fast enough
869             for (y = 0; y < s->height; y++) {
870                 double w = 1 / sqrt(nb_freq);
871                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
872                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
873                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
874                 a = FFMIN(a, 255);
875                 b = FFMIN(b, 255);
876                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
877
878                 fill_rectangle(screen,
879                             s->xpos, s->height-y, 1, 1,
880                             fgcolor);
881             }
882         }
883         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
884         if (!s->paused)
885             s->xpos++;
886         if (s->xpos >= s->width)
887             s->xpos= s->xleft;
888     }
889 }
890
891 static void stream_close(VideoState *is)
892 {
893     VideoPicture *vp;
894     int i;
895     /* XXX: use a special url_shutdown call to abort parse cleanly */
896     is->abort_request = 1;
897     SDL_WaitThread(is->read_tid, NULL);
898     SDL_WaitThread(is->refresh_tid, NULL);
899     packet_queue_destroy(&is->videoq);
900     packet_queue_destroy(&is->audioq);
901     packet_queue_destroy(&is->subtitleq);
902
903     /* free all pictures */
904     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
905         vp = &is->pictq[i];
906 #if CONFIG_AVFILTER
907         if (vp->picref) {
908             avfilter_unref_buffer(vp->picref);
909             vp->picref = NULL;
910         }
911 #endif
912         if (vp->bmp) {
913             SDL_FreeYUVOverlay(vp->bmp);
914             vp->bmp = NULL;
915         }
916     }
917     SDL_DestroyMutex(is->pictq_mutex);
918     SDL_DestroyCond(is->pictq_cond);
919     SDL_DestroyMutex(is->subpq_mutex);
920     SDL_DestroyCond(is->subpq_cond);
921 #if !CONFIG_AVFILTER
922     if (is->img_convert_ctx)
923         sws_freeContext(is->img_convert_ctx);
924 #endif
925     av_free(is);
926 }
927
928 static void do_exit(VideoState *is)
929 {
930     if (is) {
931         stream_close(is);
932     }
933     av_lockmgr_register(NULL);
934     uninit_opts();
935 #if CONFIG_AVFILTER
936     avfilter_uninit();
937 #endif
938     avformat_network_deinit();
939     if (show_status)
940         printf("\n");
941     SDL_Quit();
942     av_log(NULL, AV_LOG_QUIET, "%s", "");
943     exit(0);
944 }
945
946 static void sigterm_handler(int sig)
947 {
948     exit(123);
949 }
950
951 static int video_open(VideoState *is, int force_set_video_mode)
952 {
953     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
954     int w,h;
955     VideoPicture *vp = &is->pictq[is->pictq_rindex];
956
957     if (is_full_screen) flags |= SDL_FULLSCREEN;
958     else                flags |= SDL_RESIZABLE;
959
960     if (is_full_screen && fs_screen_width) {
961         w = fs_screen_width;
962         h = fs_screen_height;
963     } else if (!is_full_screen && screen_width) {
964         w = screen_width;
965         h = screen_height;
966     } else if (vp->width) {
967         w = vp->width;
968         h = vp->height;
969     } else {
970         w = 640;
971         h = 480;
972     }
973     if (screen && is->width == screen->w && screen->w == w
974        && is->height== screen->h && screen->h == h && !force_set_video_mode)
975         return 0;
976     screen = SDL_SetVideoMode(w, h, 0, flags);
977     if (!screen) {
978         fprintf(stderr, "SDL: could not set video mode - exiting\n");
979         do_exit(is);
980     }
981     if (!window_title)
982         window_title = input_filename;
983     SDL_WM_SetCaption(window_title, window_title);
984
985     is->width  = screen->w;
986     is->height = screen->h;
987
988     return 0;
989 }
990
991 /* display the current picture, if any */
992 static void video_display(VideoState *is)
993 {
994     if (!screen)
995         video_open(is, 0);
996     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
997         video_audio_display(is);
998     else if (is->video_st)
999         video_image_display(is);
1000 }
1001
1002 static int refresh_thread(void *opaque)
1003 {
1004     VideoState *is= opaque;
1005     while (!is->abort_request) {
1006         SDL_Event event;
1007         event.type = FF_REFRESH_EVENT;
1008         event.user.data1 = opaque;
1009         if (!is->refresh && (!is->paused || is->force_refresh)) {
1010             is->refresh = 1;
1011             SDL_PushEvent(&event);
1012         }
1013         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1014         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1015     }
1016     return 0;
1017 }
1018
1019 /* get the current audio clock value */
1020 static double get_audio_clock(VideoState *is)
1021 {
1022     if (is->paused) {
1023         return is->audio_current_pts;
1024     } else {
1025         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1026     }
1027 }
1028
1029 /* get the current video clock value */
1030 static double get_video_clock(VideoState *is)
1031 {
1032     if (is->paused) {
1033         return is->video_current_pts;
1034     } else {
1035         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1036     }
1037 }
1038
1039 /* get the current external clock value */
1040 static double get_external_clock(VideoState *is)
1041 {
1042     int64_t ti;
1043     ti = av_gettime();
1044     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1045 }
1046
1047 /* get the current master clock value */
1048 static double get_master_clock(VideoState *is)
1049 {
1050     double val;
1051
1052     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1053         if (is->video_st)
1054             val = get_video_clock(is);
1055         else
1056             val = get_audio_clock(is);
1057     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1058         if (is->audio_st)
1059             val = get_audio_clock(is);
1060         else
1061             val = get_video_clock(is);
1062     } else {
1063         val = get_external_clock(is);
1064     }
1065     return val;
1066 }
1067
1068 /* seek in the stream */
1069 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1070 {
1071     if (!is->seek_req) {
1072         is->seek_pos = pos;
1073         is->seek_rel = rel;
1074         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1075         if (seek_by_bytes)
1076             is->seek_flags |= AVSEEK_FLAG_BYTE;
1077         is->seek_req = 1;
1078     }
1079 }
1080
1081 /* pause or resume the video */
1082 static void stream_toggle_pause(VideoState *is)
1083 {
1084     if (is->paused) {
1085         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1086         if (is->read_pause_return != AVERROR(ENOSYS)) {
1087             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1088         }
1089         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1090     }
1091     is->paused = !is->paused;
1092 }
1093
1094 static double compute_target_delay(double delay, VideoState *is)
1095 {
1096     double sync_threshold, diff;
1097
1098     /* update delay to follow master synchronisation source */
1099     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1100          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1101         /* if video is slave, we try to correct big delays by
1102            duplicating or deleting a frame */
1103         diff = get_video_clock(is) - get_master_clock(is);
1104
1105         /* skip or repeat frame. We take into account the
1106            delay to compute the threshold. I still don't know
1107            if it is the best guess */
1108         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1109         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1110             if (diff <= -sync_threshold)
1111                 delay = 0;
1112             else if (diff >= sync_threshold)
1113                 delay = 2 * delay;
1114         }
1115     }
1116
1117     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1118             delay, -diff);
1119
1120     return delay;
1121 }
1122
1123 static void pictq_next_picture(VideoState *is) {
1124     /* update queue size and signal for next picture */
1125     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1126         is->pictq_rindex = 0;
1127
1128     SDL_LockMutex(is->pictq_mutex);
1129     is->pictq_size--;
1130     SDL_CondSignal(is->pictq_cond);
1131     SDL_UnlockMutex(is->pictq_mutex);
1132 }
1133
1134 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1135     double time = av_gettime() / 1000000.0;
1136     /* update current video pts */
1137     is->video_current_pts = pts;
1138     is->video_current_pts_drift = is->video_current_pts - time;
1139     is->video_current_pos = pos;
1140     is->frame_last_pts = pts;
1141 }
1142
1143 /* called to display each frame */
1144 static void video_refresh(void *opaque)
1145 {
1146     VideoState *is = opaque;
1147     VideoPicture *vp;
1148     double time;
1149
1150     SubPicture *sp, *sp2;
1151
1152     if (is->video_st) {
1153 retry:
1154         if (is->pictq_size == 0) {
1155             SDL_LockMutex(is->pictq_mutex);
1156             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1157                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1158                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1159             }
1160             SDL_UnlockMutex(is->pictq_mutex);
1161             // nothing to do, no picture to display in the que
1162         } else {
1163             double last_duration, duration, delay;
1164             /* dequeue the picture */
1165             vp = &is->pictq[is->pictq_rindex];
1166
1167             if (vp->skip) {
1168                 pictq_next_picture(is);
1169                 goto retry;
1170             }
1171
1172             if (is->paused)
1173                 goto display;
1174
1175             /* compute nominal last_duration */
1176             last_duration = vp->pts - is->frame_last_pts;
1177             if (last_duration > 0 && last_duration < 10.0) {
1178                 /* if duration of the last frame was sane, update last_duration in video state */
1179                 is->frame_last_duration = last_duration;
1180             }
1181             delay = compute_target_delay(is->frame_last_duration, is);
1182
1183             time= av_gettime()/1000000.0;
1184             if (time < is->frame_timer + delay)
1185                 return;
1186
1187             if (delay > 0)
1188                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1189
1190             SDL_LockMutex(is->pictq_mutex);
1191             update_video_pts(is, vp->pts, vp->pos);
1192             SDL_UnlockMutex(is->pictq_mutex);
1193
1194             if (is->pictq_size > 1) {
1195                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1196                 duration = nextvp->pts - vp->pts;
1197                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1198                     is->frame_drops_late++;
1199                     pictq_next_picture(is);
1200                     goto retry;
1201                 }
1202             }
1203
1204             if (is->subtitle_st) {
1205                 if (is->subtitle_stream_changed) {
1206                     SDL_LockMutex(is->subpq_mutex);
1207
1208                     while (is->subpq_size) {
1209                         free_subpicture(&is->subpq[is->subpq_rindex]);
1210
1211                         /* update queue size and signal for next picture */
1212                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1213                             is->subpq_rindex = 0;
1214
1215                         is->subpq_size--;
1216                     }
1217                     is->subtitle_stream_changed = 0;
1218
1219                     SDL_CondSignal(is->subpq_cond);
1220                     SDL_UnlockMutex(is->subpq_mutex);
1221                 } else {
1222                     if (is->subpq_size > 0) {
1223                         sp = &is->subpq[is->subpq_rindex];
1224
1225                         if (is->subpq_size > 1)
1226                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1227                         else
1228                             sp2 = NULL;
1229
1230                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1231                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1232                         {
1233                             free_subpicture(sp);
1234
1235                             /* update queue size and signal for next picture */
1236                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1237                                 is->subpq_rindex = 0;
1238
1239                             SDL_LockMutex(is->subpq_mutex);
1240                             is->subpq_size--;
1241                             SDL_CondSignal(is->subpq_cond);
1242                             SDL_UnlockMutex(is->subpq_mutex);
1243                         }
1244                     }
1245                 }
1246             }
1247
1248 display:
1249             /* display picture */
1250             if (!display_disable)
1251                 video_display(is);
1252
1253             if (!is->paused)
1254                 pictq_next_picture(is);
1255         }
1256     } else if (is->audio_st) {
1257         /* draw the next audio frame */
1258
1259         /* if only audio stream, then display the audio bars (better
1260            than nothing, just to test the implementation */
1261
1262         /* display picture */
1263         if (!display_disable)
1264             video_display(is);
1265     }
1266     is->force_refresh = 0;
1267     if (show_status) {
1268         static int64_t last_time;
1269         int64_t cur_time;
1270         int aqsize, vqsize, sqsize;
1271         double av_diff;
1272
1273         cur_time = av_gettime();
1274         if (!last_time || (cur_time - last_time) >= 30000) {
1275             aqsize = 0;
1276             vqsize = 0;
1277             sqsize = 0;
1278             if (is->audio_st)
1279                 aqsize = is->audioq.size;
1280             if (is->video_st)
1281                 vqsize = is->videoq.size;
1282             if (is->subtitle_st)
1283                 sqsize = is->subtitleq.size;
1284             av_diff = 0;
1285             if (is->audio_st && is->video_st)
1286                 av_diff = get_audio_clock(is) - get_video_clock(is);
1287             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1288                    get_master_clock(is),
1289                    av_diff,
1290                    is->frame_drops_early + is->frame_drops_late,
1291                    aqsize / 1024,
1292                    vqsize / 1024,
1293                    sqsize,
1294                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1295                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1296             fflush(stdout);
1297             last_time = cur_time;
1298         }
1299     }
1300 }
1301
1302 /* allocate a picture (needs to do that in main thread to avoid
1303    potential locking problems */
1304 static void alloc_picture(AllocEventProps *event_props)
1305 {
1306     VideoState *is = event_props->is;
1307     AVFrame *frame = event_props->frame;
1308     VideoPicture *vp;
1309
1310     vp = &is->pictq[is->pictq_windex];
1311
1312     if (vp->bmp)
1313         SDL_FreeYUVOverlay(vp->bmp);
1314
1315 #if CONFIG_AVFILTER
1316     if (vp->picref)
1317         avfilter_unref_buffer(vp->picref);
1318     vp->picref = NULL;
1319 #endif
1320
1321     vp->width   = frame->width;
1322     vp->height  = frame->height;
1323     vp->pix_fmt = frame->format;
1324
1325     video_open(event_props->is, 0);
1326
1327     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1328                                    SDL_YV12_OVERLAY,
1329                                    screen);
1330     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1331         /* SDL allocates a buffer smaller than requested if the video
1332          * overlay hardware is unable to support the requested size. */
1333         fprintf(stderr, "Error: the video system does not support an image\n"
1334                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1335                         "to reduce the image size.\n", vp->width, vp->height );
1336         do_exit(is);
1337     }
1338
1339     SDL_LockMutex(is->pictq_mutex);
1340     vp->allocated = 1;
1341     SDL_CondSignal(is->pictq_cond);
1342     SDL_UnlockMutex(is->pictq_mutex);
1343 }
1344
1345 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1346 {
1347     VideoPicture *vp;
1348     double frame_delay, pts = pts1;
1349
1350     /* compute the exact PTS for the picture if it is omitted in the stream
1351      * pts1 is the dts of the pkt / pts of the frame */
1352     if (pts != 0) {
1353         /* update video clock with pts, if present */
1354         is->video_clock = pts;
1355     } else {
1356         pts = is->video_clock;
1357     }
1358     /* update video clock for next frame */
1359     frame_delay = av_q2d(is->video_st->codec->time_base);
1360     /* for MPEG2, the frame can be repeated, so we update the
1361        clock accordingly */
1362     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1363     is->video_clock += frame_delay;
1364
1365 #if defined(DEBUG_SYNC) && 0
1366     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1367            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1368 #endif
1369
1370     /* wait until we have space to put a new picture */
1371     SDL_LockMutex(is->pictq_mutex);
1372
1373     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1374            !is->videoq.abort_request) {
1375         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1376     }
1377     SDL_UnlockMutex(is->pictq_mutex);
1378
1379     if (is->videoq.abort_request)
1380         return -1;
1381
1382     vp = &is->pictq[is->pictq_windex];
1383
1384     /* alloc or resize hardware picture buffer */
1385     if (!vp->bmp || vp->reallocate ||
1386         vp->width  != src_frame->width ||
1387         vp->height != src_frame->height) {
1388         SDL_Event event;
1389         AllocEventProps event_props;
1390
1391         event_props.frame = src_frame;
1392         event_props.is = is;
1393
1394         vp->allocated  = 0;
1395         vp->reallocate = 0;
1396
1397         /* the allocation must be done in the main thread to avoid
1398            locking problems. We wait in this block for the event to complete,
1399            so we can pass a pointer to event_props to it. */
1400         event.type = FF_ALLOC_EVENT;
1401         event.user.data1 = &event_props;
1402         SDL_PushEvent(&event);
1403
1404         /* wait until the picture is allocated */
1405         SDL_LockMutex(is->pictq_mutex);
1406         while (!vp->allocated && !is->videoq.abort_request) {
1407             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408         }
1409         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1410         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1411             while (!vp->allocated) {
1412                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1413             }
1414         }
1415         SDL_UnlockMutex(is->pictq_mutex);
1416
1417         if (is->videoq.abort_request)
1418             return -1;
1419     }
1420
1421     /* if the frame is not skipped, then display it */
1422     if (vp->bmp) {
1423         AVPicture pict = { { 0 } };
1424 #if CONFIG_AVFILTER
1425         if (vp->picref)
1426             avfilter_unref_buffer(vp->picref);
1427         vp->picref = src_frame->opaque;
1428 #endif
1429
1430         /* get a pointer on the bitmap */
1431         SDL_LockYUVOverlay (vp->bmp);
1432
1433         pict.data[0] = vp->bmp->pixels[0];
1434         pict.data[1] = vp->bmp->pixels[2];
1435         pict.data[2] = vp->bmp->pixels[1];
1436
1437         pict.linesize[0] = vp->bmp->pitches[0];
1438         pict.linesize[1] = vp->bmp->pitches[2];
1439         pict.linesize[2] = vp->bmp->pitches[1];
1440
1441 #if CONFIG_AVFILTER
1442         // FIXME use direct rendering
1443         av_picture_copy(&pict, (AVPicture *)src_frame,
1444                         vp->pix_fmt, vp->width, vp->height);
1445         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1446 #else
1447         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1448         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1449             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1450             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1451         if (is->img_convert_ctx == NULL) {
1452             fprintf(stderr, "Cannot initialize the conversion context\n");
1453             exit(1);
1454         }
1455         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1456                   0, vp->height, pict.data, pict.linesize);
1457         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1458 #endif
1459         /* update the bitmap content */
1460         SDL_UnlockYUVOverlay(vp->bmp);
1461
1462         vp->pts = pts;
1463         vp->pos = pos;
1464         vp->skip = 0;
1465
1466         /* now we can update the picture count */
1467         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1468             is->pictq_windex = 0;
1469         SDL_LockMutex(is->pictq_mutex);
1470         is->pictq_size++;
1471         SDL_UnlockMutex(is->pictq_mutex);
1472     }
1473     return 0;
1474 }
1475
1476 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1477 {
1478     int got_picture, i;
1479
1480     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1481         return -1;
1482
1483     if (pkt->data == flush_pkt.data) {
1484         avcodec_flush_buffers(is->video_st->codec);
1485
1486         SDL_LockMutex(is->pictq_mutex);
1487         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1488         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1489             is->pictq[i].skip = 1;
1490         }
1491         while (is->pictq_size && !is->videoq.abort_request) {
1492             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1493         }
1494         is->video_current_pos = -1;
1495         is->frame_last_pts = AV_NOPTS_VALUE;
1496         is->frame_last_duration = 0;
1497         is->frame_timer = (double)av_gettime() / 1000000.0;
1498         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1499         SDL_UnlockMutex(is->pictq_mutex);
1500
1501         return 0;
1502     }
1503
1504     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1505
1506     if (got_picture) {
1507         int ret = 1;
1508
1509         if (decoder_reorder_pts == -1) {
1510             *pts = av_frame_get_best_effort_timestamp(frame);
1511         } else if (decoder_reorder_pts) {
1512             *pts = frame->pkt_pts;
1513         } else {
1514             *pts = frame->pkt_dts;
1515         }
1516
1517         if (*pts == AV_NOPTS_VALUE) {
1518             *pts = 0;
1519         }
1520
1521         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1522              (framedrop>0 || (framedrop && is->audio_st))) {
1523             SDL_LockMutex(is->pictq_mutex);
1524             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1525                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1526                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1527                 double ptsdiff = dpts - is->frame_last_pts;
1528                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1529                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1530                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1531                     is->frame_last_dropped_pos = pkt->pos;
1532                     is->frame_last_dropped_pts = dpts;
1533                     is->frame_drops_early++;
1534                     ret = 0;
1535                 }
1536             }
1537             SDL_UnlockMutex(is->pictq_mutex);
1538         }
1539
1540         if (ret)
1541             is->frame_last_returned_time = av_gettime() / 1000000.0;
1542
1543         return ret;
1544     }
1545     return 0;
1546 }
1547
1548 #if CONFIG_AVFILTER
1549 typedef struct {
1550     VideoState *is;
1551     AVFrame *frame;
1552     int use_dr1;
1553 } FilterPriv;
1554
1555 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1556 {
1557     AVFilterContext *ctx = codec->opaque;
1558     AVFilterBufferRef  *ref;
1559     int perms = AV_PERM_WRITE;
1560     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1561     unsigned edge;
1562     int pixel_size;
1563
1564     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1565
1566     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1567         perms |= AV_PERM_NEG_LINESIZES;
1568
1569     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1570         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1571         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1572         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1573     }
1574     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1575
1576     w = codec->width;
1577     h = codec->height;
1578
1579     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1580         return -1;
1581
1582     avcodec_align_dimensions2(codec, &w, &h, stride);
1583     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1584     w += edge << 1;
1585     h += edge << 1;
1586     if (codec->pix_fmt != ctx->outputs[0]->format) {
1587         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1588         return -1;
1589     }
1590     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1591         return -1;
1592
1593     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1594     ref->video->w = codec->width;
1595     ref->video->h = codec->height;
1596     for (i = 0; i < 4; i ++) {
1597         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1598         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1599
1600         pic->base[i]     = ref->data[i];
1601         if (ref->data[i]) {
1602             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1603         }
1604         pic->data[i]     = ref->data[i];
1605         pic->linesize[i] = ref->linesize[i];
1606     }
1607     pic->opaque = ref;
1608     pic->type   = FF_BUFFER_TYPE_USER;
1609     pic->reordered_opaque = codec->reordered_opaque;
1610     pic->width               = codec->width;
1611     pic->height              = codec->height;
1612     pic->format              = codec->pix_fmt;
1613     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1614     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1615     else            pic->pkt_pts = AV_NOPTS_VALUE;
1616     return 0;
1617 }
1618
1619 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1620 {
1621     memset(pic->data, 0, sizeof(pic->data));
1622     avfilter_unref_buffer(pic->opaque);
1623 }
1624
1625 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1626 {
1627     AVFilterBufferRef *ref = pic->opaque;
1628
1629     if (pic->data[0] == NULL) {
1630         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1631         return codec->get_buffer(codec, pic);
1632     }
1633
1634     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1635         (codec->pix_fmt != ref->format)) {
1636         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1637         return -1;
1638     }
1639
1640     pic->reordered_opaque = codec->reordered_opaque;
1641     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1642     else            pic->pkt_pts = AV_NOPTS_VALUE;
1643     return 0;
1644 }
1645
1646 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1647 {
1648     FilterPriv *priv = ctx->priv;
1649     AVCodecContext *codec;
1650     if (!opaque) return -1;
1651
1652     priv->is = opaque;
1653     codec    = priv->is->video_st->codec;
1654     codec->opaque = ctx;
1655     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1656         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1657         priv->use_dr1 = 1;
1658         codec->get_buffer     = input_get_buffer;
1659         codec->release_buffer = input_release_buffer;
1660         codec->reget_buffer   = input_reget_buffer;
1661         codec->thread_safe_callbacks = 1;
1662     }
1663
1664     priv->frame = avcodec_alloc_frame();
1665
1666     return 0;
1667 }
1668
1669 static void input_uninit(AVFilterContext *ctx)
1670 {
1671     FilterPriv *priv = ctx->priv;
1672     av_free(priv->frame);
1673 }
1674
1675 static int input_request_frame(AVFilterLink *link)
1676 {
1677     FilterPriv *priv = link->src->priv;
1678     AVFilterBufferRef *picref;
1679     int64_t pts = 0;
1680     AVPacket pkt;
1681     int ret;
1682
1683     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1684         av_free_packet(&pkt);
1685     if (ret < 0)
1686         return -1;
1687
1688     if (priv->use_dr1 && priv->frame->opaque) {
1689         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1690     } else {
1691         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
1692         av_image_copy(picref->data, picref->linesize,
1693                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1694                       picref->format, priv->frame->width, priv->frame->height);
1695     }
1696     av_free_packet(&pkt);
1697
1698     avfilter_copy_frame_props(picref, priv->frame);
1699     picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
1700     picref->pts = pts;
1701
1702     avfilter_start_frame(link, picref);
1703     avfilter_draw_slice(link, 0, picref->video->h, 1);
1704     avfilter_end_frame(link);
1705
1706     return 0;
1707 }
1708
1709 static int input_query_formats(AVFilterContext *ctx)
1710 {
1711     FilterPriv *priv = ctx->priv;
1712     enum PixelFormat pix_fmts[] = {
1713         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1714     };
1715
1716     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1717     return 0;
1718 }
1719
1720 static int input_config_props(AVFilterLink *link)
1721 {
1722     FilterPriv *priv  = link->src->priv;
1723     AVStream *s = priv->is->video_st;
1724
1725     link->w = s->codec->width;
1726     link->h = s->codec->height;
1727     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1728         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1729     link->time_base = s->time_base;
1730
1731     return 0;
1732 }
1733
1734 static AVFilter input_filter =
1735 {
1736     .name      = "ffplay_input",
1737
1738     .priv_size = sizeof(FilterPriv),
1739
1740     .init      = input_init,
1741     .uninit    = input_uninit,
1742
1743     .query_formats = input_query_formats,
1744
1745     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1746     .outputs   = (AVFilterPad[]) {{ .name = "default",
1747                                     .type = AVMEDIA_TYPE_VIDEO,
1748                                     .request_frame = input_request_frame,
1749                                     .config_props  = input_config_props, },
1750                                   { .name = NULL }},
1751 };
1752
1753 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1754 {
1755     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1756     char sws_flags_str[128];
1757     int ret;
1758     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1759     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
1760     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1761     graph->scale_sws_opts = av_strdup(sws_flags_str);
1762
1763     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1764                                             NULL, is, graph)) < 0)
1765         return ret;
1766
1767 #if FF_API_OLD_VSINK_API
1768     ret = avfilter_graph_create_filter(&filt_out,
1769                                        avfilter_get_by_name("buffersink"),
1770                                        "out", NULL, pix_fmts, graph);
1771 #else
1772     buffersink_params->pixel_fmts = pix_fmts;
1773     ret = avfilter_graph_create_filter(&filt_out,
1774                                        avfilter_get_by_name("buffersink"),
1775                                        "out", NULL, buffersink_params, graph);
1776 #endif
1777     av_freep(&buffersink_params);
1778     if (ret < 0)
1779         return ret;
1780
1781     if ((ret = avfilter_graph_create_filter(&filt_format,
1782                                             avfilter_get_by_name("format"),
1783                                             "format", "yuv420p", NULL, graph)) < 0)
1784         return ret;
1785     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1786         return ret;
1787
1788
1789     if (vfilters) {
1790         AVFilterInOut *outputs = avfilter_inout_alloc();
1791         AVFilterInOut *inputs  = avfilter_inout_alloc();
1792
1793         outputs->name    = av_strdup("in");
1794         outputs->filter_ctx = filt_src;
1795         outputs->pad_idx = 0;
1796         outputs->next    = NULL;
1797
1798         inputs->name    = av_strdup("out");
1799         inputs->filter_ctx = filt_format;
1800         inputs->pad_idx = 0;
1801         inputs->next    = NULL;
1802
1803         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1804             return ret;
1805     } else {
1806         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1807             return ret;
1808     }
1809
1810     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1811         return ret;
1812
1813     is->out_video_filter = filt_out;
1814
1815     return ret;
1816 }
1817
1818 #endif  /* CONFIG_AVFILTER */
1819
1820 static int video_thread(void *arg)
1821 {
1822     VideoState *is = arg;
1823     AVFrame *frame = avcodec_alloc_frame();
1824     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1825     double pts;
1826     int ret;
1827
1828 #if CONFIG_AVFILTER
1829     AVFilterGraph *graph = avfilter_graph_alloc();
1830     AVFilterContext *filt_out = NULL;
1831     int last_w = is->video_st->codec->width;
1832     int last_h = is->video_st->codec->height;
1833
1834     if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1835         SDL_Event event;
1836         event.type = FF_QUIT_EVENT;
1837         event.user.data1 = is;
1838         SDL_PushEvent(&event);
1839         goto the_end;
1840     }
1841     filt_out = is->out_video_filter;
1842 #endif
1843
1844     for (;;) {
1845 #if !CONFIG_AVFILTER
1846         AVPacket pkt;
1847 #else
1848         AVFilterBufferRef *picref;
1849         AVRational tb = filt_out->inputs[0]->time_base;
1850 #endif
1851         while (is->paused && !is->videoq.abort_request)
1852             SDL_Delay(10);
1853 #if CONFIG_AVFILTER
1854         if (   last_w != is->video_st->codec->width
1855             || last_h != is->video_st->codec->height) {
1856             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1857                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1858             avfilter_graph_free(&graph);
1859             graph = avfilter_graph_alloc();
1860             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1861                 goto the_end;
1862             filt_out = is->out_video_filter;
1863             last_w = is->video_st->codec->width;
1864             last_h = is->video_st->codec->height;
1865         }
1866         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1867         if (picref) {
1868             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1869             pts_int = picref->pts;
1870             tb      = filt_out->inputs[0]->time_base;
1871             pos     = picref->pos;
1872             frame->opaque = picref;
1873
1874             ret = 1;
1875         }
1876
1877         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1878             av_unused int64_t pts1 = pts_int;
1879             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1880             av_dlog(NULL, "video_thread(): "
1881                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1882                     tb.num, tb.den, pts1,
1883                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1884         }
1885 #else
1886         ret = get_video_frame(is, frame, &pts_int, &pkt);
1887         pos = pkt.pos;
1888         av_free_packet(&pkt);
1889         if (ret == 0)
1890             continue;
1891 #endif
1892
1893         if (ret < 0)
1894             goto the_end;
1895
1896         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1897         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1898             is->frame_last_filter_delay = 0;
1899
1900 #if CONFIG_AVFILTER
1901         if (!picref)
1902             continue;
1903 #endif
1904
1905         pts = pts_int * av_q2d(is->video_st->time_base);
1906
1907         ret = queue_picture(is, frame, pts, pos);
1908
1909         if (ret < 0)
1910             goto the_end;
1911
1912         if (is->step)
1913             stream_toggle_pause(is);
1914     }
1915  the_end:
1916     avcodec_flush_buffers(is->video_st->codec);
1917 #if CONFIG_AVFILTER
1918     av_freep(&vfilters);
1919     avfilter_graph_free(&graph);
1920 #endif
1921     av_free(frame);
1922     return 0;
1923 }
1924
1925 static int subtitle_thread(void *arg)
1926 {
1927     VideoState *is = arg;
1928     SubPicture *sp;
1929     AVPacket pkt1, *pkt = &pkt1;
1930     int got_subtitle;
1931     double pts;
1932     int i, j;
1933     int r, g, b, y, u, v, a;
1934
1935     for (;;) {
1936         while (is->paused && !is->subtitleq.abort_request) {
1937             SDL_Delay(10);
1938         }
1939         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1940             break;
1941
1942         if (pkt->data == flush_pkt.data) {
1943             avcodec_flush_buffers(is->subtitle_st->codec);
1944             continue;
1945         }
1946         SDL_LockMutex(is->subpq_mutex);
1947         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1948                !is->subtitleq.abort_request) {
1949             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1950         }
1951         SDL_UnlockMutex(is->subpq_mutex);
1952
1953         if (is->subtitleq.abort_request)
1954             return 0;
1955
1956         sp = &is->subpq[is->subpq_windex];
1957
1958        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1959            this packet, if any */
1960         pts = 0;
1961         if (pkt->pts != AV_NOPTS_VALUE)
1962             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1963
1964         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1965                                  &got_subtitle, pkt);
1966
1967         if (got_subtitle && sp->sub.format == 0) {
1968             sp->pts = pts;
1969
1970             for (i = 0; i < sp->sub.num_rects; i++)
1971             {
1972                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1973                 {
1974                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1975                     y = RGB_TO_Y_CCIR(r, g, b);
1976                     u = RGB_TO_U_CCIR(r, g, b, 0);
1977                     v = RGB_TO_V_CCIR(r, g, b, 0);
1978                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1979                 }
1980             }
1981
1982             /* now we can update the picture count */
1983             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1984                 is->subpq_windex = 0;
1985             SDL_LockMutex(is->subpq_mutex);
1986             is->subpq_size++;
1987             SDL_UnlockMutex(is->subpq_mutex);
1988         }
1989         av_free_packet(pkt);
1990     }
1991     return 0;
1992 }
1993
1994 /* copy samples for viewing in editor window */
1995 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1996 {
1997     int size, len;
1998
1999     size = samples_size / sizeof(short);
2000     while (size > 0) {
2001         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2002         if (len > size)
2003             len = size;
2004         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2005         samples += len;
2006         is->sample_array_index += len;
2007         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2008             is->sample_array_index = 0;
2009         size -= len;
2010     }
2011 }
2012
2013 /* return the wanted number of samples to get better sync if sync_type is video
2014  * or external master clock */
2015 static int synchronize_audio(VideoState *is, int nb_samples)
2016 {
2017     int wanted_nb_samples = nb_samples;
2018
2019     /* if not master, then we try to remove or add samples to correct the clock */
2020     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2021          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2022         double diff, avg_diff;
2023         int min_nb_samples, max_nb_samples;
2024
2025         diff = get_audio_clock(is) - get_master_clock(is);
2026
2027         if (diff < AV_NOSYNC_THRESHOLD) {
2028             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2029             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2030                 /* not enough measures to have a correct estimate */
2031                 is->audio_diff_avg_count++;
2032             } else {
2033                 /* estimate the A-V difference */
2034                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2035
2036                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2037                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
2038                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2039                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2040                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2041                 }
2042                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2043                         diff, avg_diff, wanted_nb_samples - nb_samples,
2044                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2045             }
2046         } else {
2047             /* too big difference : may be initial PTS errors, so
2048                reset A-V filter */
2049             is->audio_diff_avg_count = 0;
2050             is->audio_diff_cum       = 0;
2051         }
2052     }
2053
2054     return wanted_nb_samples;
2055 }
2056
2057 /* decode one audio frame and returns its uncompressed size */
2058 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2059 {
2060     AVPacket *pkt_temp = &is->audio_pkt_temp;
2061     AVPacket *pkt = &is->audio_pkt;
2062     AVCodecContext *dec = is->audio_st->codec;
2063     int len1, len2, data_size, resampled_data_size;
2064     int64_t dec_channel_layout;
2065     int got_frame;
2066     double pts;
2067     int new_packet = 0;
2068     int flush_complete = 0;
2069     int wanted_nb_samples;
2070
2071     for (;;) {
2072         /* NOTE: the audio packet can contain several frames */
2073         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2074             if (!is->frame) {
2075                 if (!(is->frame = avcodec_alloc_frame()))
2076                     return AVERROR(ENOMEM);
2077             } else
2078                 avcodec_get_frame_defaults(is->frame);
2079
2080             if (is->paused)
2081                 return -1;
2082
2083             if (flush_complete)
2084                 break;
2085             new_packet = 0;
2086             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2087             if (len1 < 0) {
2088                 /* if error, we skip the frame */
2089                 pkt_temp->size = 0;
2090                 break;
2091             }
2092
2093             pkt_temp->data += len1;
2094             pkt_temp->size -= len1;
2095
2096             if (!got_frame) {
2097                 /* stop sending empty packets if the decoder is finished */
2098                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2099                     flush_complete = 1;
2100                 continue;
2101             }
2102             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2103                                                    is->frame->nb_samples,
2104                                                    dec->sample_fmt, 1);
2105
2106             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2107             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2108
2109             if (dec->sample_fmt != is->audio_src_fmt ||
2110                 dec_channel_layout != is->audio_src_channel_layout ||
2111                 dec->sample_rate != is->audio_src_freq ||
2112                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2113                 if (is->swr_ctx)
2114                     swr_free(&is->swr_ctx);
2115                 is->swr_ctx = swr_alloc_set_opts(NULL,
2116                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2117                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2118                                                  0, NULL);
2119                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2120                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2121                         dec->sample_rate,
2122                         av_get_sample_fmt_name(dec->sample_fmt),
2123                         dec->channels,
2124                         is->audio_tgt_freq,
2125                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2126                         is->audio_tgt_channels);
2127                     break;
2128                 }
2129                 is->audio_src_channel_layout = dec_channel_layout;
2130                 is->audio_src_channels = dec->channels;
2131                 is->audio_src_freq = dec->sample_rate;
2132                 is->audio_src_fmt = dec->sample_fmt;
2133             }
2134
2135             resampled_data_size = data_size;
2136             if (is->swr_ctx) {
2137                 const uint8_t *in[] = { is->frame->data[0] };
2138                 uint8_t *out[] = {is->audio_buf2};
2139                 if (wanted_nb_samples != is->frame->nb_samples) {
2140                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
2141                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
2142                         fprintf(stderr, "swr_set_compensation() failed\n");
2143                         break;
2144                     }
2145                 }
2146                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2147                                                 in, is->frame->nb_samples);
2148                 if (len2 < 0) {
2149                     fprintf(stderr, "audio_resample() failed\n");
2150                     break;
2151                 }
2152                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2153                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2154                     swr_init(is->swr_ctx);
2155                 }
2156                 is->audio_buf = is->audio_buf2;
2157                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2158             } else {
2159                 is->audio_buf = is->frame->data[0];
2160             }
2161
2162             /* if no pts, then compute it */
2163             pts = is->audio_clock;
2164             *pts_ptr = pts;
2165             is->audio_clock += (double)data_size /
2166                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2167 #ifdef DEBUG
2168             {
2169                 static double last_clock;
2170                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2171                        is->audio_clock - last_clock,
2172                        is->audio_clock, pts);
2173                 last_clock = is->audio_clock;
2174             }
2175 #endif
2176             return resampled_data_size;
2177         }
2178
2179         /* free the current packet */
2180         if (pkt->data)
2181             av_free_packet(pkt);
2182         memset(pkt_temp, 0, sizeof(*pkt_temp));
2183
2184         if (is->paused || is->audioq.abort_request) {
2185             return -1;
2186         }
2187
2188         /* read next packet */
2189         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2190             return -1;
2191
2192         if (pkt->data == flush_pkt.data) {
2193             avcodec_flush_buffers(dec);
2194             flush_complete = 0;
2195         }
2196
2197         *pkt_temp = *pkt;
2198
2199         /* if update the audio clock with the pts */
2200         if (pkt->pts != AV_NOPTS_VALUE) {
2201             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2202         }
2203     }
2204 }
2205
2206 /* prepare a new audio buffer */
2207 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2208 {
2209     VideoState *is = opaque;
2210     int audio_size, len1;
2211     int bytes_per_sec;
2212     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
2213     double pts;
2214
2215     audio_callback_time = av_gettime();
2216
2217     while (len > 0) {
2218         if (is->audio_buf_index >= is->audio_buf_size) {
2219            audio_size = audio_decode_frame(is, &pts);
2220            if (audio_size < 0) {
2221                 /* if error, just output silence */
2222                is->audio_buf      = is->silence_buf;
2223                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2224            } else {
2225                if (is->show_mode != SHOW_MODE_VIDEO)
2226                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2227                is->audio_buf_size = audio_size;
2228            }
2229            is->audio_buf_index = 0;
2230         }
2231         len1 = is->audio_buf_size - is->audio_buf_index;
2232         if (len1 > len)
2233             len1 = len;
2234         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2235         len -= len1;
2236         stream += len1;
2237         is->audio_buf_index += len1;
2238     }
2239     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2240     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2241     /* Let's assume the audio driver that is used by SDL has two periods. */
2242     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2243     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2244 }
2245
2246 /* open a given stream. Return 0 if OK */
2247 static int stream_component_open(VideoState *is, int stream_index)
2248 {
2249     AVFormatContext *ic = is->ic;
2250     AVCodecContext *avctx;
2251     AVCodec *codec;
2252     SDL_AudioSpec wanted_spec, spec;
2253     AVDictionary *opts;
2254     AVDictionaryEntry *t = NULL;
2255     int64_t wanted_channel_layout = 0;
2256     int wanted_nb_channels;
2257     const char *env;
2258
2259     if (stream_index < 0 || stream_index >= ic->nb_streams)
2260         return -1;
2261     avctx = ic->streams[stream_index]->codec;
2262
2263     codec = avcodec_find_decoder(avctx->codec_id);
2264     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2265
2266     switch(avctx->codec_type){
2267         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2268         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2269         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2270     }
2271     if (!codec)
2272         return -1;
2273
2274     avctx->workaround_bugs   = workaround_bugs;
2275     avctx->lowres            = lowres;
2276     if(avctx->lowres > codec->max_lowres){
2277         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2278                 codec->max_lowres);
2279         avctx->lowres= codec->max_lowres;
2280     }
2281     avctx->idct_algo         = idct;
2282     avctx->skip_frame        = skip_frame;
2283     avctx->skip_idct         = skip_idct;
2284     avctx->skip_loop_filter  = skip_loop_filter;
2285     avctx->error_concealment = error_concealment;
2286
2287     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2288     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2289     if(codec->capabilities & CODEC_CAP_DR1)
2290         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2291
2292     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2293         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2294         env = SDL_getenv("SDL_AUDIO_CHANNELS");
2295         if (env)
2296             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2297         if (!wanted_channel_layout) {
2298             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2299             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2300             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2301             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2302             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > (SDL_VERSION_ATLEAST(1, 2, 8) ? 6 : 2))) {
2303                 wanted_nb_channels--;
2304                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2305             }
2306         }
2307         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2308         wanted_spec.freq = avctx->sample_rate;
2309         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2310             fprintf(stderr, "Invalid sample rate or channel count!\n");
2311             return -1;
2312         }
2313     }
2314
2315     if (!av_dict_get(opts, "threads", NULL, 0))
2316         av_dict_set(&opts, "threads", "auto", 0);
2317     if (!codec ||
2318         avcodec_open2(avctx, codec, &opts) < 0)
2319         return -1;
2320     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2321         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2322         return AVERROR_OPTION_NOT_FOUND;
2323     }
2324
2325     /* prepare audio output */
2326     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2327         wanted_spec.format = AUDIO_S16SYS;
2328         wanted_spec.silence = 0;
2329         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2330         wanted_spec.callback = sdl_audio_callback;
2331         wanted_spec.userdata = is;
2332         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2333             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2334             return -1;
2335         }
2336         is->audio_hw_buf_size = spec.size;
2337         if (spec.format != AUDIO_S16SYS) {
2338             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2339             return -1;
2340         }
2341         if (spec.channels != wanted_spec.channels) {
2342             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2343             if (!wanted_channel_layout) {
2344                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2345                 return -1;
2346             }
2347         }
2348         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2349         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2350         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2351         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2352     }
2353
2354     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2355     switch (avctx->codec_type) {
2356     case AVMEDIA_TYPE_AUDIO:
2357         is->audio_stream = stream_index;
2358         is->audio_st = ic->streams[stream_index];
2359         is->audio_buf_size  = 0;
2360         is->audio_buf_index = 0;
2361
2362         /* init averaging filter */
2363         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2364         is->audio_diff_avg_count = 0;
2365         /* since we do not have a precise anough audio fifo fullness,
2366            we correct audio sync only if larger than this threshold */
2367         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2368
2369         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2370         packet_queue_start(&is->audioq);
2371         SDL_PauseAudio(0);
2372         break;
2373     case AVMEDIA_TYPE_VIDEO:
2374         is->video_stream = stream_index;
2375         is->video_st = ic->streams[stream_index];
2376
2377         packet_queue_start(&is->videoq);
2378         is->video_tid = SDL_CreateThread(video_thread, is);
2379         break;
2380     case AVMEDIA_TYPE_SUBTITLE:
2381         is->subtitle_stream = stream_index;
2382         is->subtitle_st = ic->streams[stream_index];
2383         packet_queue_start(&is->subtitleq);
2384
2385         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2386         break;
2387     default:
2388         break;
2389     }
2390     return 0;
2391 }
2392
2393 static void stream_component_close(VideoState *is, int stream_index)
2394 {
2395     AVFormatContext *ic = is->ic;
2396     AVCodecContext *avctx;
2397
2398     if (stream_index < 0 || stream_index >= ic->nb_streams)
2399         return;
2400     avctx = ic->streams[stream_index]->codec;
2401
2402     switch (avctx->codec_type) {
2403     case AVMEDIA_TYPE_AUDIO:
2404         packet_queue_abort(&is->audioq);
2405
2406         SDL_CloseAudio();
2407
2408         packet_queue_flush(&is->audioq);
2409         av_free_packet(&is->audio_pkt);
2410         if (is->swr_ctx)
2411             swr_free(&is->swr_ctx);
2412         av_freep(&is->audio_buf1);
2413         is->audio_buf = NULL;
2414         av_freep(&is->frame);
2415
2416         if (is->rdft) {
2417             av_rdft_end(is->rdft);
2418             av_freep(&is->rdft_data);
2419             is->rdft = NULL;
2420             is->rdft_bits = 0;
2421         }
2422         break;
2423     case AVMEDIA_TYPE_VIDEO:
2424         packet_queue_abort(&is->videoq);
2425
2426         /* note: we also signal this mutex to make sure we deblock the
2427            video thread in all cases */
2428         SDL_LockMutex(is->pictq_mutex);
2429         SDL_CondSignal(is->pictq_cond);
2430         SDL_UnlockMutex(is->pictq_mutex);
2431
2432         SDL_WaitThread(is->video_tid, NULL);
2433
2434         packet_queue_flush(&is->videoq);
2435         break;
2436     case AVMEDIA_TYPE_SUBTITLE:
2437         packet_queue_abort(&is->subtitleq);
2438
2439         /* note: we also signal this mutex to make sure we deblock the
2440            video thread in all cases */
2441         SDL_LockMutex(is->subpq_mutex);
2442         is->subtitle_stream_changed = 1;
2443
2444         SDL_CondSignal(is->subpq_cond);
2445         SDL_UnlockMutex(is->subpq_mutex);
2446
2447         SDL_WaitThread(is->subtitle_tid, NULL);
2448
2449         packet_queue_flush(&is->subtitleq);
2450         break;
2451     default:
2452         break;
2453     }
2454
2455     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2456     avcodec_close(avctx);
2457     switch (avctx->codec_type) {
2458     case AVMEDIA_TYPE_AUDIO:
2459         is->audio_st = NULL;
2460         is->audio_stream = -1;
2461         break;
2462     case AVMEDIA_TYPE_VIDEO:
2463         is->video_st = NULL;
2464         is->video_stream = -1;
2465         break;
2466     case AVMEDIA_TYPE_SUBTITLE:
2467         is->subtitle_st = NULL;
2468         is->subtitle_stream = -1;
2469         break;
2470     default:
2471         break;
2472     }
2473 }
2474
2475 static int decode_interrupt_cb(void *ctx)
2476 {
2477     VideoState *is = ctx;
2478     return is->abort_request;
2479 }
2480
2481 /* this thread gets the stream from the disk or the network */
2482 static int read_thread(void *arg)
2483 {
2484     VideoState *is = arg;
2485     AVFormatContext *ic = NULL;
2486     int err, i, ret;
2487     int st_index[AVMEDIA_TYPE_NB];
2488     AVPacket pkt1, *pkt = &pkt1;
2489     int eof = 0;
2490     int pkt_in_play_range = 0;
2491     AVDictionaryEntry *t;
2492     AVDictionary **opts;
2493     int orig_nb_streams;
2494
2495     memset(st_index, -1, sizeof(st_index));
2496     is->last_video_stream = is->video_stream = -1;
2497     is->last_audio_stream = is->audio_stream = -1;
2498     is->last_subtitle_stream = is->subtitle_stream = -1;
2499
2500     ic = avformat_alloc_context();
2501     ic->interrupt_callback.callback = decode_interrupt_cb;
2502     ic->interrupt_callback.opaque = is;
2503     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2504     if (err < 0) {
2505         print_error(is->filename, err);
2506         ret = -1;
2507         goto fail;
2508     }
2509     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2510         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2511         ret = AVERROR_OPTION_NOT_FOUND;
2512         goto fail;
2513     }
2514     is->ic = ic;
2515
2516     if (genpts)
2517         ic->flags |= AVFMT_FLAG_GENPTS;
2518
2519     opts = setup_find_stream_info_opts(ic, codec_opts);
2520     orig_nb_streams = ic->nb_streams;
2521
2522     err = avformat_find_stream_info(ic, opts);
2523     if (err < 0) {
2524         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2525         ret = -1;
2526         goto fail;
2527     }
2528     for (i = 0; i < orig_nb_streams; i++)
2529         av_dict_free(&opts[i]);
2530     av_freep(&opts);
2531
2532     if (ic->pb)
2533         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2534
2535     if (seek_by_bytes < 0)
2536         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2537
2538     /* if seeking requested, we execute it */
2539     if (start_time != AV_NOPTS_VALUE) {
2540         int64_t timestamp;
2541
2542         timestamp = start_time;
2543         /* add the stream start time */
2544         if (ic->start_time != AV_NOPTS_VALUE)
2545             timestamp += ic->start_time;
2546         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2547         if (ret < 0) {
2548             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2549                     is->filename, (double)timestamp / AV_TIME_BASE);
2550         }
2551     }
2552
2553     for (i = 0; i < ic->nb_streams; i++)
2554         ic->streams[i]->discard = AVDISCARD_ALL;
2555     if (!video_disable)
2556         st_index[AVMEDIA_TYPE_VIDEO] =
2557             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2558                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2559     if (!audio_disable)
2560         st_index[AVMEDIA_TYPE_AUDIO] =
2561             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2562                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2563                                 st_index[AVMEDIA_TYPE_VIDEO],
2564                                 NULL, 0);
2565     if (!video_disable)
2566         st_index[AVMEDIA_TYPE_SUBTITLE] =
2567             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2568                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2569                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2570                                  st_index[AVMEDIA_TYPE_AUDIO] :
2571                                  st_index[AVMEDIA_TYPE_VIDEO]),
2572                                 NULL, 0);
2573     if (show_status) {
2574         av_dump_format(ic, 0, is->filename, 0);
2575     }
2576
2577     is->show_mode = show_mode;
2578
2579     /* open the streams */
2580     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2581         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2582     }
2583
2584     ret = -1;
2585     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2586         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2587     }
2588     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2589     if (is->show_mode == SHOW_MODE_NONE)
2590         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2591
2592     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2593         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2594     }
2595
2596     if (is->video_stream < 0 && is->audio_stream < 0) {
2597         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2598         ret = -1;
2599         goto fail;
2600     }
2601
2602     for (;;) {
2603         if (is->abort_request)
2604             break;
2605         if (is->paused != is->last_paused) {
2606             is->last_paused = is->paused;
2607             if (is->paused)
2608                 is->read_pause_return = av_read_pause(ic);
2609             else
2610                 av_read_play(ic);
2611         }
2612 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2613         if (is->paused &&
2614                 (!strcmp(ic->iformat->name, "rtsp") ||
2615                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2616             /* wait 10 ms to avoid trying to get another packet */
2617             /* XXX: horrible */
2618             SDL_Delay(10);
2619             continue;
2620         }
2621 #endif
2622         if (is->seek_req) {
2623             int64_t seek_target = is->seek_pos;
2624             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2625             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2626 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2627 //      of the seek_pos/seek_rel variables
2628
2629             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2630             if (ret < 0) {
2631                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2632             } else {
2633                 if (is->audio_stream >= 0) {
2634                     packet_queue_flush(&is->audioq);
2635                     packet_queue_put(&is->audioq, &flush_pkt);
2636                 }
2637                 if (is->subtitle_stream >= 0) {
2638                     packet_queue_flush(&is->subtitleq);
2639                     packet_queue_put(&is->subtitleq, &flush_pkt);
2640                 }
2641                 if (is->video_stream >= 0) {
2642                     packet_queue_flush(&is->videoq);
2643                     packet_queue_put(&is->videoq, &flush_pkt);
2644                 }
2645             }
2646             is->seek_req = 0;
2647             eof = 0;
2648         }
2649
2650         /* if the queue are full, no need to read more */
2651         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2652             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2653                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2654                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
2655             /* wait 10 ms */
2656             SDL_Delay(10);
2657             continue;
2658         }
2659         if (eof) {
2660             if (is->video_stream >= 0) {
2661                 av_init_packet(pkt);
2662                 pkt->data = NULL;
2663                 pkt->size = 0;
2664                 pkt->stream_index = is->video_stream;
2665                 packet_queue_put(&is->videoq, pkt);
2666             }
2667             if (is->audio_stream >= 0 &&
2668                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2669                 av_init_packet(pkt);
2670                 pkt->data = NULL;
2671                 pkt->size = 0;
2672                 pkt->stream_index = is->audio_stream;
2673                 packet_queue_put(&is->audioq, pkt);
2674             }
2675             SDL_Delay(10);
2676             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2677                 if (loop != 1 && (!loop || --loop)) {
2678                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2679                 } else if (autoexit) {
2680                     ret = AVERROR_EOF;
2681                     goto fail;
2682                 }
2683             }
2684             eof=0;
2685             continue;
2686         }
2687         ret = av_read_frame(ic, pkt);
2688         if (ret < 0) {
2689             if (ret == AVERROR_EOF || url_feof(ic->pb))
2690                 eof = 1;
2691             if (ic->pb && ic->pb->error)
2692                 break;
2693             SDL_Delay(100); /* wait for user event */
2694             continue;
2695         }
2696         /* check if packet is in play range specified by user, then queue, otherwise discard */
2697         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2698                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2699                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2700                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2701                 <= ((double)duration / 1000000);
2702         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2703             packet_queue_put(&is->audioq, pkt);
2704         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2705             packet_queue_put(&is->videoq, pkt);
2706         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2707             packet_queue_put(&is->subtitleq, pkt);
2708         } else {
2709             av_free_packet(pkt);
2710         }
2711     }
2712     /* wait until the end */
2713     while (!is->abort_request) {
2714         SDL_Delay(100);
2715     }
2716
2717     ret = 0;
2718  fail:
2719     /* close each stream */
2720     if (is->audio_stream >= 0)
2721         stream_component_close(is, is->audio_stream);
2722     if (is->video_stream >= 0)
2723         stream_component_close(is, is->video_stream);
2724     if (is->subtitle_stream >= 0)
2725         stream_component_close(is, is->subtitle_stream);
2726     if (is->ic) {
2727         avformat_close_input(&is->ic);
2728     }
2729
2730     if (ret != 0) {
2731         SDL_Event event;
2732
2733         event.type = FF_QUIT_EVENT;
2734         event.user.data1 = is;
2735         SDL_PushEvent(&event);
2736     }
2737     return 0;
2738 }
2739
2740 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2741 {
2742     VideoState *is;
2743
2744     is = av_mallocz(sizeof(VideoState));
2745     if (!is)
2746         return NULL;
2747     av_strlcpy(is->filename, filename, sizeof(is->filename));
2748     is->iformat = iformat;
2749     is->ytop    = 0;
2750     is->xleft   = 0;
2751
2752     /* start video display */
2753     is->pictq_mutex = SDL_CreateMutex();
2754     is->pictq_cond  = SDL_CreateCond();
2755
2756     is->subpq_mutex = SDL_CreateMutex();
2757     is->subpq_cond  = SDL_CreateCond();
2758
2759     packet_queue_init(&is->videoq);
2760     packet_queue_init(&is->audioq);
2761     packet_queue_init(&is->subtitleq);
2762
2763     is->av_sync_type = av_sync_type;
2764     is->read_tid     = SDL_CreateThread(read_thread, is);
2765     if (!is->read_tid) {
2766         av_free(is);
2767         return NULL;
2768     }
2769     return is;
2770 }
2771
2772 static void stream_cycle_channel(VideoState *is, int codec_type)
2773 {
2774     AVFormatContext *ic = is->ic;
2775     int start_index, stream_index;
2776     int old_index;
2777     AVStream *st;
2778
2779     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2780         start_index = is->last_video_stream;
2781         old_index = is->video_stream;
2782     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2783         start_index = is->last_audio_stream;
2784         old_index = is->audio_stream;
2785     } else {
2786         start_index = is->last_subtitle_stream;
2787         old_index = is->subtitle_stream;
2788     }
2789     stream_index = start_index;
2790     for (;;) {
2791         if (++stream_index >= is->ic->nb_streams)
2792         {
2793             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2794             {
2795                 stream_index = -1;
2796                 is->last_subtitle_stream = -1;
2797                 goto the_end;
2798             }
2799             if (start_index == -1)
2800                 return;
2801             stream_index = 0;
2802         }
2803         if (stream_index == start_index)
2804             return;
2805         st = ic->streams[stream_index];
2806         if (st->codec->codec_type == codec_type) {
2807             /* check that parameters are OK */
2808             switch (codec_type) {
2809             case AVMEDIA_TYPE_AUDIO:
2810                 if (st->codec->sample_rate != 0 &&
2811                     st->codec->channels != 0)
2812                     goto the_end;
2813                 break;
2814             case AVMEDIA_TYPE_VIDEO:
2815             case AVMEDIA_TYPE_SUBTITLE:
2816                 goto the_end;
2817             default:
2818                 break;
2819             }
2820         }
2821     }
2822  the_end:
2823     stream_component_close(is, old_index);
2824     stream_component_open(is, stream_index);
2825 }
2826
2827
2828 static void toggle_full_screen(VideoState *is)
2829 {
2830     av_unused int i;
2831     is_full_screen = !is_full_screen;
2832 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2833     /* OS X needs to reallocate the SDL overlays */
2834     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2835         is->pictq[i].reallocate = 1;
2836     }
2837 #endif
2838     video_open(is, 1);
2839 }
2840
2841 static void toggle_pause(VideoState *is)
2842 {
2843     stream_toggle_pause(is);
2844     is->step = 0;
2845 }
2846
2847 static void step_to_next_frame(VideoState *is)
2848 {
2849     /* if the stream is paused unpause it, then step */
2850     if (is->paused)
2851         stream_toggle_pause(is);
2852     is->step = 1;
2853 }
2854
2855 static void toggle_audio_display(VideoState *is)
2856 {
2857     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2858     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2859     fill_rectangle(screen,
2860                 is->xleft, is->ytop, is->width, is->height,
2861                 bgcolor);
2862     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2863 }
2864
2865 /* handle an event sent by the GUI */
2866 static void event_loop(VideoState *cur_stream)
2867 {
2868     SDL_Event event;
2869     double incr, pos, frac;
2870
2871     for (;;) {
2872         double x;
2873         SDL_WaitEvent(&event);
2874         switch (event.type) {
2875         case SDL_KEYDOWN:
2876             if (exit_on_keydown) {
2877                 do_exit(cur_stream);
2878                 break;
2879             }
2880             switch (event.key.keysym.sym) {
2881             case SDLK_ESCAPE:
2882             case SDLK_q:
2883                 do_exit(cur_stream);
2884                 break;
2885             case SDLK_f:
2886                 toggle_full_screen(cur_stream);
2887                 cur_stream->force_refresh = 1;
2888                 break;
2889             case SDLK_p:
2890             case SDLK_SPACE:
2891                 toggle_pause(cur_stream);
2892                 break;
2893             case SDLK_s: // S: Step to next frame
2894                 step_to_next_frame(cur_stream);
2895                 break;
2896             case SDLK_a:
2897                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2898                 break;
2899             case SDLK_v:
2900                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2901                 break;
2902             case SDLK_t:
2903                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2904                 break;
2905             case SDLK_w:
2906                 toggle_audio_display(cur_stream);
2907                 cur_stream->force_refresh = 1;
2908                 break;
2909             case SDLK_PAGEUP:
2910                 incr = 600.0;
2911                 goto do_seek;
2912             case SDLK_PAGEDOWN:
2913                 incr = -600.0;
2914                 goto do_seek;
2915             case SDLK_LEFT:
2916                 incr = -10.0;
2917                 goto do_seek;
2918             case SDLK_RIGHT:
2919                 incr = 10.0;
2920                 goto do_seek;
2921             case SDLK_UP:
2922                 incr = 60.0;
2923                 goto do_seek;
2924             case SDLK_DOWN:
2925                 incr = -60.0;
2926             do_seek:
2927                     if (seek_by_bytes) {
2928                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2929                             pos = cur_stream->video_current_pos;
2930                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2931                             pos = cur_stream->audio_pkt.pos;
2932                         } else
2933                             pos = avio_tell(cur_stream->ic->pb);
2934                         if (cur_stream->ic->bit_rate)
2935                             incr *= cur_stream->ic->bit_rate / 8.0;
2936                         else
2937                             incr *= 180000.0;
2938                         pos += incr;
2939                         stream_seek(cur_stream, pos, incr, 1);
2940                     } else {
2941                         pos = get_master_clock(cur_stream);
2942                         pos += incr;
2943                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2944                     }
2945                 break;
2946             default:
2947                 break;
2948             }
2949             break;
2950         case SDL_VIDEOEXPOSE:
2951             cur_stream->force_refresh = 1;
2952             break;
2953         case SDL_MOUSEBUTTONDOWN:
2954             if (exit_on_mousedown) {
2955                 do_exit(cur_stream);
2956                 break;
2957             }
2958         case SDL_MOUSEMOTION:
2959             if (event.type == SDL_MOUSEBUTTONDOWN) {
2960                 x = event.button.x;
2961             } else {
2962                 if (event.motion.state != SDL_PRESSED)
2963                     break;
2964                 x = event.motion.x;
2965             }
2966                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2967                     uint64_t size =  avio_size(cur_stream->ic->pb);
2968                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2969                 } else {
2970                     int64_t ts;
2971                     int ns, hh, mm, ss;
2972                     int tns, thh, tmm, tss;
2973                     tns  = cur_stream->ic->duration / 1000000LL;
2974                     thh  = tns / 3600;
2975                     tmm  = (tns % 3600) / 60;
2976                     tss  = (tns % 60);
2977                     frac = x / cur_stream->width;
2978                     ns   = frac * tns;
2979                     hh   = ns / 3600;
2980                     mm   = (ns % 3600) / 60;
2981                     ss   = (ns % 60);
2982                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2983                             hh, mm, ss, thh, tmm, tss);
2984                     ts = frac * cur_stream->ic->duration;
2985                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2986                         ts += cur_stream->ic->start_time;
2987                     stream_seek(cur_stream, ts, 0, 0);
2988                 }
2989             break;
2990         case SDL_VIDEORESIZE:
2991                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2992                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2993                 screen_width  = cur_stream->width  = event.resize.w;
2994                 screen_height = cur_stream->height = event.resize.h;
2995                 cur_stream->force_refresh = 1;
2996             break;
2997         case SDL_QUIT:
2998         case FF_QUIT_EVENT:
2999             do_exit(cur_stream);
3000             break;
3001         case FF_ALLOC_EVENT:
3002             alloc_picture(event.user.data1);
3003             break;
3004         case FF_REFRESH_EVENT:
3005             video_refresh(event.user.data1);
3006             cur_stream->refresh = 0;
3007             break;
3008         default:
3009             break;
3010         }
3011     }
3012 }
3013
3014 static int opt_frame_size(const char *opt, const char *arg)
3015 {
3016     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3017     return opt_default("video_size", arg);
3018 }
3019
3020 static int opt_width(const char *opt, const char *arg)
3021 {
3022     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3023     return 0;
3024 }
3025
3026 static int opt_height(const char *opt, const char *arg)
3027 {
3028     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3029     return 0;
3030 }
3031
3032 static int opt_format(const char *opt, const char *arg)
3033 {
3034     file_iformat = av_find_input_format(arg);
3035     if (!file_iformat) {
3036         fprintf(stderr, "Unknown input format: %s\n", arg);
3037         return AVERROR(EINVAL);
3038     }
3039     return 0;
3040 }
3041
3042 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3043 {
3044     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3045     return opt_default("pixel_format", arg);
3046 }
3047
3048 static int opt_sync(const char *opt, const char *arg)
3049 {
3050     if (!strcmp(arg, "audio"))
3051         av_sync_type = AV_SYNC_AUDIO_MASTER;
3052     else if (!strcmp(arg, "video"))
3053         av_sync_type = AV_SYNC_VIDEO_MASTER;
3054     else if (!strcmp(arg, "ext"))
3055         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3056     else {
3057         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3058         exit(1);
3059     }
3060     return 0;
3061 }
3062
3063 static int opt_seek(const char *opt, const char *arg)
3064 {
3065     start_time = parse_time_or_die(opt, arg, 1);
3066     return 0;
3067 }
3068
3069 static int opt_duration(const char *opt, const char *arg)
3070 {
3071     duration = parse_time_or_die(opt, arg, 1);
3072     return 0;
3073 }
3074
3075 static int opt_show_mode(const char *opt, const char *arg)
3076 {
3077     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3078                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3079                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3080                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3081     return 0;
3082 }
3083
3084 static void opt_input_file(void *optctx, const char *filename)
3085 {
3086     if (input_filename) {
3087         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3088                 filename, input_filename);
3089         exit_program(1);
3090     }
3091     if (!strcmp(filename, "-"))
3092         filename = "pipe:";
3093     input_filename = filename;
3094 }
3095
3096 static int opt_codec(void *o, const char *opt, const char *arg)
3097 {
3098     switch(opt[strlen(opt)-1]){
3099     case 'a' :    audio_codec_name = arg; break;
3100     case 's' : subtitle_codec_name = arg; break;
3101     case 'v' :    video_codec_name = arg; break;
3102     }
3103     return 0;
3104 }
3105
3106 static int dummy;
3107
3108 static const OptionDef options[] = {
3109 #include "cmdutils_common_opts.h"
3110     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3111     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3112     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3113     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3114     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3115     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3116     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3117     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3118     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3119     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3120     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3121     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3122     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3123     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3124     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3125     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3126     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3127     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3128     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3129     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3130     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3131     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3132     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3133     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3134     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3135     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3136     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3137     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3138     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3139     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3140     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3141     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3142     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3143 #if CONFIG_AVFILTER
3144     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3145 #endif
3146     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3147     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3148     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3149     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3150     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3151     { NULL, },
3152 };
3153
3154 static void show_usage(void)
3155 {
3156     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3157     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3158     av_log(NULL, AV_LOG_INFO, "\n");
3159 }
3160
3161 static int opt_help(const char *opt, const char *arg)
3162 {
3163     av_log_set_callback(log_callback_help);
3164     show_usage();
3165     show_help_options(options, "Main options:\n",
3166                       OPT_EXPERT, 0);
3167     show_help_options(options, "\nAdvanced options:\n",
3168                       OPT_EXPERT, OPT_EXPERT);
3169     printf("\n");
3170     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3171     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3172 #if !CONFIG_AVFILTER
3173     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3174 #endif
3175     printf("\nWhile playing:\n"
3176            "q, ESC              quit\n"
3177            "f                   toggle full screen\n"
3178            "p, SPC              pause\n"
3179            "a                   cycle audio channel\n"
3180            "v                   cycle video channel\n"
3181            "t                   cycle subtitle channel\n"
3182            "w                   show audio waves\n"
3183            "s                   activate frame-step mode\n"
3184            "left/right          seek backward/forward 10 seconds\n"
3185            "down/up             seek backward/forward 1 minute\n"
3186            "page down/page up   seek backward/forward 10 minutes\n"
3187            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3188            );
3189     return 0;
3190 }
3191
3192 static int lockmgr(void **mtx, enum AVLockOp op)
3193 {
3194    switch(op) {
3195       case AV_LOCK_CREATE:
3196           *mtx = SDL_CreateMutex();
3197           if(!*mtx)
3198               return 1;
3199           return 0;
3200       case AV_LOCK_OBTAIN:
3201           return !!SDL_LockMutex(*mtx);
3202       case AV_LOCK_RELEASE:
3203           return !!SDL_UnlockMutex(*mtx);
3204       case AV_LOCK_DESTROY:
3205           SDL_DestroyMutex(*mtx);
3206           return 0;
3207    }
3208    return 1;
3209 }
3210
3211 /* Called from the main */
3212 int main(int argc, char **argv)
3213 {
3214     int flags;
3215     VideoState *is;
3216
3217     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3218     parse_loglevel(argc, argv, options);
3219
3220     /* register all codecs, demux and protocols */
3221     avcodec_register_all();
3222 #if CONFIG_AVDEVICE
3223     avdevice_register_all();
3224 #endif
3225 #if CONFIG_AVFILTER
3226     avfilter_register_all();
3227 #endif
3228     av_register_all();
3229     avformat_network_init();
3230
3231     init_opts();
3232
3233     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3234     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3235
3236     show_banner(argc, argv, options);
3237
3238     parse_options(NULL, argc, argv, options, opt_input_file);
3239
3240     if (!input_filename) {
3241         show_usage();
3242         fprintf(stderr, "An input file must be specified\n");
3243         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3244         exit(1);
3245     }
3246
3247     if (display_disable) {
3248         video_disable = 1;
3249     }
3250     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3251     if (audio_disable)
3252         flags &= ~SDL_INIT_AUDIO;
3253 #if !defined(__MINGW32__) && !defined(__APPLE__)
3254     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3255 #endif
3256     if (SDL_Init (flags)) {
3257         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3258         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3259         exit(1);
3260     }
3261
3262     if (!display_disable) {
3263 #if HAVE_SDL_VIDEO_SIZE
3264         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3265         fs_screen_width = vi->current_w;
3266         fs_screen_height = vi->current_h;
3267 #endif
3268     }
3269
3270     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3271     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3272     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3273
3274     if (av_lockmgr_register(lockmgr)) {
3275         fprintf(stderr, "Could not initialize lock manager!\n");
3276         do_exit(NULL);
3277     }
3278
3279     av_init_packet(&flush_pkt);
3280     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3281
3282     is = stream_open(input_filename, file_iformat);
3283     if (!is) {
3284         fprintf(stderr, "Failed to initialize VideoState!\n");
3285         do_exit(NULL);
3286     }
3287
3288     event_loop(is);
3289
3290     /* never returns */
3291
3292     return 0;
3293 }