timecode: move timecode muxer options to metadata.
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavutil/opt.h"
44 #include "libavcodec/avfft.h"
45 #include "libswresample/swresample.h"
46
47 #if CONFIG_AVFILTER
48 # include "libavfilter/avcodec.h"
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/avfiltergraph.h"
51 # include "libavfilter/buffersink.h"
52 #endif
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58
59 #include <unistd.h>
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     int64_t pos;                                 ///< byte position in file
103     int skip;
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     AVRational sample_aspect_ratio;
107     int allocated;
108     int reallocate;
109
110 #if CONFIG_AVFILTER
111     AVFilterBufferRef *picref;
112 #endif
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 enum {
121     AV_SYNC_AUDIO_MASTER, /* default choice */
122     AV_SYNC_VIDEO_MASTER,
123     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125
126 typedef struct VideoState {
127     SDL_Thread *read_tid;
128     SDL_Thread *video_tid;
129     SDL_Thread *refresh_tid;
130     AVInputFormat *iformat;
131     int no_background;
132     int abort_request;
133     int force_refresh;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
158     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
159     uint8_t *audio_buf;
160     uint8_t *audio_buf1;
161     unsigned int audio_buf_size; /* in bytes */
162     int audio_buf_index; /* in bytes */
163     int audio_write_buf_size;
164     AVPacket audio_pkt_temp;
165     AVPacket audio_pkt;
166     enum AVSampleFormat audio_src_fmt;
167     enum AVSampleFormat audio_tgt_fmt;
168     int audio_src_channels;
169     int audio_tgt_channels;
170     int64_t audio_src_channel_layout;
171     int64_t audio_tgt_channel_layout;
172     int audio_src_freq;
173     int audio_tgt_freq;
174     struct SwrContext *swr_ctx;
175     double audio_current_pts;
176     double audio_current_pts_drift;
177     int frame_drops_early;
178     int frame_drops_late;
179     AVFrame *frame;
180
181     enum ShowMode {
182         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
183     } show_mode;
184     int16_t sample_array[SAMPLE_ARRAY_SIZE];
185     int sample_array_index;
186     int last_i_start;
187     RDFTContext *rdft;
188     int rdft_bits;
189     FFTSample *rdft_data;
190     int xpos;
191
192     SDL_Thread *subtitle_tid;
193     int subtitle_stream;
194     int subtitle_stream_changed;
195     AVStream *subtitle_st;
196     PacketQueue subtitleq;
197     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
198     int subpq_size, subpq_rindex, subpq_windex;
199     SDL_mutex *subpq_mutex;
200     SDL_cond *subpq_cond;
201
202     double frame_timer;
203     double frame_last_pts;
204     double frame_last_duration;
205     double frame_last_dropped_pts;
206     double frame_last_returned_time;
207     double frame_last_filter_delay;
208     int64_t frame_last_dropped_pos;
209     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
210     int video_stream;
211     AVStream *video_st;
212     PacketQueue videoq;
213     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
214     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
215     int64_t video_current_pos;                   ///< current displayed file pos
216     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
217     int pictq_size, pictq_rindex, pictq_windex;
218     SDL_mutex *pictq_mutex;
219     SDL_cond *pictq_cond;
220 #if !CONFIG_AVFILTER
221     struct SwsContext *img_convert_ctx;
222 #endif
223
224     char filename[1024];
225     int width, height, xleft, ytop;
226     int step;
227
228 #if CONFIG_AVFILTER
229     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
230 #endif
231
232     int refresh;
233     int last_video_stream, last_audio_stream, last_subtitle_stream;
234 } VideoState;
235
236 typedef struct AllocEventProps {
237     VideoState *is;
238     AVFrame *frame;
239 } AllocEventProps;
240
241 static int opt_help(const char *opt, const char *arg);
242
243 /* options specified by the user */
244 static AVInputFormat *file_iformat;
245 static const char *input_filename;
246 static const char *window_title;
247 static int fs_screen_width;
248 static int fs_screen_height;
249 static int screen_width  = 0;
250 static int screen_height = 0;
251 static int audio_disable;
252 static int video_disable;
253 static int wanted_stream[AVMEDIA_TYPE_NB] = {
254     [AVMEDIA_TYPE_AUDIO]    = -1,
255     [AVMEDIA_TYPE_VIDEO]    = -1,
256     [AVMEDIA_TYPE_SUBTITLE] = -1,
257 };
258 static int seek_by_bytes = -1;
259 static int display_disable;
260 static int show_status = 1;
261 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
262 static int64_t start_time = AV_NOPTS_VALUE;
263 static int64_t duration = AV_NOPTS_VALUE;
264 static int workaround_bugs = 1;
265 static int fast = 0;
266 static int genpts = 0;
267 static int lowres = 0;
268 static int idct = FF_IDCT_AUTO;
269 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
270 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
271 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
272 static int error_concealment = 3;
273 static int decoder_reorder_pts = -1;
274 static int autoexit;
275 static int exit_on_keydown;
276 static int exit_on_mousedown;
277 static int loop = 1;
278 static int framedrop = -1;
279 static enum ShowMode show_mode = SHOW_MODE_NONE;
280 static const char *audio_codec_name;
281 static const char *subtitle_codec_name;
282 static const char *video_codec_name;
283 static int rdftspeed = 20;
284 #if CONFIG_AVFILTER
285 static char *vfilters = NULL;
286 #endif
287
288 /* current context */
289 static int is_full_screen;
290 static int64_t audio_callback_time;
291
292 static AVPacket flush_pkt;
293
294 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
295 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
296 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
297
298 static SDL_Surface *screen;
299
300 void av_noreturn exit_program(int ret)
301 {
302     exit(ret);
303 }
304
305 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
306 {
307     AVPacketList *pkt1;
308
309     if (q->abort_request)
310        return -1;
311
312     pkt1 = av_malloc(sizeof(AVPacketList));
313     if (!pkt1)
314         return -1;
315     pkt1->pkt = *pkt;
316     pkt1->next = NULL;
317
318     if (!q->last_pkt)
319         q->first_pkt = pkt1;
320     else
321         q->last_pkt->next = pkt1;
322     q->last_pkt = pkt1;
323     q->nb_packets++;
324     q->size += pkt1->pkt.size + sizeof(*pkt1);
325     /* XXX: should duplicate packet data in DV case */
326     SDL_CondSignal(q->cond);
327     return 0;
328 }
329
330 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
331 {
332     int ret;
333
334     /* duplicate the packet */
335     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
336         return -1;
337
338     SDL_LockMutex(q->mutex);
339     ret = packet_queue_put_private(q, pkt);
340     SDL_UnlockMutex(q->mutex);
341
342     if (pkt != &flush_pkt && ret < 0)
343         av_free_packet(pkt);
344
345     return ret;
346 }
347
348 /* packet queue handling */
349 static void packet_queue_init(PacketQueue *q)
350 {
351     memset(q, 0, sizeof(PacketQueue));
352     q->mutex = SDL_CreateMutex();
353     q->cond = SDL_CreateCond();
354     q->abort_request = 1;
355 }
356
357 static void packet_queue_flush(PacketQueue *q)
358 {
359     AVPacketList *pkt, *pkt1;
360
361     SDL_LockMutex(q->mutex);
362     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
363         pkt1 = pkt->next;
364         av_free_packet(&pkt->pkt);
365         av_freep(&pkt);
366     }
367     q->last_pkt = NULL;
368     q->first_pkt = NULL;
369     q->nb_packets = 0;
370     q->size = 0;
371     SDL_UnlockMutex(q->mutex);
372 }
373
374 static void packet_queue_destroy(PacketQueue *q)
375 {
376     packet_queue_flush(q);
377     SDL_DestroyMutex(q->mutex);
378     SDL_DestroyCond(q->cond);
379 }
380
381 static void packet_queue_abort(PacketQueue *q)
382 {
383     SDL_LockMutex(q->mutex);
384
385     q->abort_request = 1;
386
387     SDL_CondSignal(q->cond);
388
389     SDL_UnlockMutex(q->mutex);
390 }
391
392 static void packet_queue_start(PacketQueue *q)
393 {
394     SDL_LockMutex(q->mutex);
395     q->abort_request = 0;
396     packet_queue_put_private(q, &flush_pkt);
397     SDL_UnlockMutex(q->mutex);
398 }
399
400 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
401 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
402 {
403     AVPacketList *pkt1;
404     int ret;
405
406     SDL_LockMutex(q->mutex);
407
408     for (;;) {
409         if (q->abort_request) {
410             ret = -1;
411             break;
412         }
413
414         pkt1 = q->first_pkt;
415         if (pkt1) {
416             q->first_pkt = pkt1->next;
417             if (!q->first_pkt)
418                 q->last_pkt = NULL;
419             q->nb_packets--;
420             q->size -= pkt1->pkt.size + sizeof(*pkt1);
421             *pkt = pkt1->pkt;
422             av_free(pkt1);
423             ret = 1;
424             break;
425         } else if (!block) {
426             ret = 0;
427             break;
428         } else {
429             SDL_CondWait(q->cond, q->mutex);
430         }
431     }
432     SDL_UnlockMutex(q->mutex);
433     return ret;
434 }
435
436 static inline void fill_rectangle(SDL_Surface *screen,
437                                   int x, int y, int w, int h, int color)
438 {
439     SDL_Rect rect;
440     rect.x = x;
441     rect.y = y;
442     rect.w = w;
443     rect.h = h;
444     SDL_FillRect(screen, &rect, color);
445 }
446
447 #define ALPHA_BLEND(a, oldp, newp, s)\
448 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449
450 #define RGBA_IN(r, g, b, a, s)\
451 {\
452     unsigned int v = ((const uint32_t *)(s))[0];\
453     a = (v >> 24) & 0xff;\
454     r = (v >> 16) & 0xff;\
455     g = (v >> 8) & 0xff;\
456     b = v & 0xff;\
457 }
458
459 #define YUVA_IN(y, u, v, a, s, pal)\
460 {\
461     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462     a = (val >> 24) & 0xff;\
463     y = (val >> 16) & 0xff;\
464     u = (val >> 8) & 0xff;\
465     v = val & 0xff;\
466 }
467
468 #define YUVA_OUT(d, y, u, v, a)\
469 {\
470     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471 }
472
473
474 #define BPP 1
475
476 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477 {
478     int wrap, wrap3, width2, skip2;
479     int y, u, v, a, u1, v1, a1, w, h;
480     uint8_t *lum, *cb, *cr;
481     const uint8_t *p;
482     const uint32_t *pal;
483     int dstx, dsty, dstw, dsth;
484
485     dstw = av_clip(rect->w, 0, imgw);
486     dsth = av_clip(rect->h, 0, imgh);
487     dstx = av_clip(rect->x, 0, imgw - dstw);
488     dsty = av_clip(rect->y, 0, imgh - dsth);
489     lum = dst->data[0] + dsty * dst->linesize[0];
490     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492
493     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494     skip2 = dstx >> 1;
495     wrap = dst->linesize[0];
496     wrap3 = rect->pict.linesize[0];
497     p = rect->pict.data[0];
498     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499
500     if (dsty & 1) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510             cb++;
511             cr++;
512             lum++;
513             p += BPP;
514         }
515         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
522             YUVA_IN(y, u, v, a, p + BPP, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += 2 * BPP;
532             lum += 2;
533         }
534         if (w) {
535             YUVA_IN(y, u, v, a, p, pal);
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539             p++;
540             lum++;
541         }
542         p += wrap3 - dstw * BPP;
543         lum += wrap - dstw - dstx;
544         cb += dst->linesize[1] - width2 - skip2;
545         cr += dst->linesize[2] - width2 - skip2;
546     }
547     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
548         lum += dstx;
549         cb += skip2;
550         cr += skip2;
551
552         if (dstx & 1) {
553             YUVA_IN(y, u, v, a, p, pal);
554             u1 = u;
555             v1 = v;
556             a1 = a;
557             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558             p += wrap3;
559             lum += wrap;
560             YUVA_IN(y, u, v, a, p, pal);
561             u1 += u;
562             v1 += v;
563             a1 += a;
564             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567             cb++;
568             cr++;
569             p += -wrap3 + BPP;
570             lum += -wrap + 1;
571         }
572         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578
579             YUVA_IN(y, u, v, a, p + BPP, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584             p += wrap3;
585             lum += wrap;
586
587             YUVA_IN(y, u, v, a, p, pal);
588             u1 += u;
589             v1 += v;
590             a1 += a;
591             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592
593             YUVA_IN(y, u, v, a, p + BPP, pal);
594             u1 += u;
595             v1 += v;
596             a1 += a;
597             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598
599             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601
602             cb++;
603             cr++;
604             p += -wrap3 + 2 * BPP;
605             lum += -wrap + 2;
606         }
607         if (w) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613             p += wrap3;
614             lum += wrap;
615             YUVA_IN(y, u, v, a, p, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622             cb++;
623             cr++;
624             p += -wrap3 + BPP;
625             lum += -wrap + 1;
626         }
627         p += wrap3 + (wrap3 - dstw * BPP);
628         lum += wrap + (wrap - dstw - dstx);
629         cb += dst->linesize[1] - width2 - skip2;
630         cr += dst->linesize[2] - width2 - skip2;
631     }
632     /* handle odd height */
633     if (h) {
634         lum += dstx;
635         cb += skip2;
636         cr += skip2;
637
638         if (dstx & 1) {
639             YUVA_IN(y, u, v, a, p, pal);
640             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643             cb++;
644             cr++;
645             lum++;
646             p += BPP;
647         }
648         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
649             YUVA_IN(y, u, v, a, p, pal);
650             u1 = u;
651             v1 = v;
652             a1 = a;
653             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654
655             YUVA_IN(y, u, v, a, p + BPP, pal);
656             u1 += u;
657             v1 += v;
658             a1 += a;
659             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662             cb++;
663             cr++;
664             p += 2 * BPP;
665             lum += 2;
666         }
667         if (w) {
668             YUVA_IN(y, u, v, a, p, pal);
669             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672         }
673     }
674 }
675
676 static void free_subpicture(SubPicture *sp)
677 {
678     avsubtitle_free(&sp->sub);
679 }
680
681 static void video_image_display(VideoState *is)
682 {
683     VideoPicture *vp;
684     SubPicture *sp;
685     AVPicture pict;
686     float aspect_ratio;
687     int width, height, x, y;
688     SDL_Rect rect;
689     int i;
690
691     vp = &is->pictq[is->pictq_rindex];
692     if (vp->bmp) {
693         if (vp->sample_aspect_ratio.num == 0)
694             aspect_ratio = 0;
695         else
696             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
697
698         if (aspect_ratio <= 0.0)
699             aspect_ratio = 1.0;
700         aspect_ratio *= (float)vp->width / (float)vp->height;
701
702         if (is->subtitle_st) {
703             if (is->subpq_size > 0) {
704                 sp = &is->subpq[is->subpq_rindex];
705
706                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
707                     SDL_LockYUVOverlay (vp->bmp);
708
709                     pict.data[0] = vp->bmp->pixels[0];
710                     pict.data[1] = vp->bmp->pixels[2];
711                     pict.data[2] = vp->bmp->pixels[1];
712
713                     pict.linesize[0] = vp->bmp->pitches[0];
714                     pict.linesize[1] = vp->bmp->pitches[2];
715                     pict.linesize[2] = vp->bmp->pitches[1];
716
717                     for (i = 0; i < sp->sub.num_rects; i++)
718                         blend_subrect(&pict, sp->sub.rects[i],
719                                       vp->bmp->w, vp->bmp->h);
720
721                     SDL_UnlockYUVOverlay (vp->bmp);
722                 }
723             }
724         }
725
726
727         /* XXX: we suppose the screen has a 1.0 pixel ratio */
728         height = is->height;
729         width = ((int)rint(height * aspect_ratio)) & ~1;
730         if (width > is->width) {
731             width = is->width;
732             height = ((int)rint(width / aspect_ratio)) & ~1;
733         }
734         x = (is->width - width) / 2;
735         y = (is->height - height) / 2;
736         is->no_background = 0;
737         rect.x = is->xleft + x;
738         rect.y = is->ytop  + y;
739         rect.w = FFMAX(width,  1);
740         rect.h = FFMAX(height, 1);
741         SDL_DisplayYUVOverlay(vp->bmp, &rect);
742     }
743 }
744
745 static inline int compute_mod(int a, int b)
746 {
747     return a < 0 ? a%b + b : a%b;
748 }
749
750 static void video_audio_display(VideoState *s)
751 {
752     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
753     int ch, channels, h, h2, bgcolor, fgcolor;
754     int16_t time_diff;
755     int rdft_bits, nb_freq;
756
757     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
758         ;
759     nb_freq = 1 << (rdft_bits - 1);
760
761     /* compute display index : center on currently output samples */
762     channels = s->audio_tgt_channels;
763     nb_display_channels = channels;
764     if (!s->paused) {
765         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
766         n = 2 * channels;
767         delay = s->audio_write_buf_size;
768         delay /= n;
769
770         /* to be more precise, we take into account the time spent since
771            the last buffer computation */
772         if (audio_callback_time) {
773             time_diff = av_gettime() - audio_callback_time;
774             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
775         }
776
777         delay += 2 * data_used;
778         if (delay < data_used)
779             delay = data_used;
780
781         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
782         if (s->show_mode == SHOW_MODE_WAVES) {
783             h = INT_MIN;
784             for (i = 0; i < 1000; i += channels) {
785                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
786                 int a = s->sample_array[idx];
787                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
788                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
789                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
790                 int score = a - d;
791                 if (h < score && (b ^ c) < 0) {
792                     h = score;
793                     i_start = idx;
794                 }
795             }
796         }
797
798         s->last_i_start = i_start;
799     } else {
800         i_start = s->last_i_start;
801     }
802
803     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
804     if (s->show_mode == SHOW_MODE_WAVES) {
805         fill_rectangle(screen,
806                        s->xleft, s->ytop, s->width, s->height,
807                        bgcolor);
808
809         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
810
811         /* total height for one channel */
812         h = s->height / nb_display_channels;
813         /* graph height / 2 */
814         h2 = (h * 9) / 20;
815         for (ch = 0; ch < nb_display_channels; ch++) {
816             i = i_start + ch;
817             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
818             for (x = 0; x < s->width; x++) {
819                 y = (s->sample_array[i] * h2) >> 15;
820                 if (y < 0) {
821                     y = -y;
822                     ys = y1 - y;
823                 } else {
824                     ys = y1;
825                 }
826                 fill_rectangle(screen,
827                                s->xleft + x, ys, 1, y,
828                                fgcolor);
829                 i += channels;
830                 if (i >= SAMPLE_ARRAY_SIZE)
831                     i -= SAMPLE_ARRAY_SIZE;
832             }
833         }
834
835         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
836
837         for (ch = 1; ch < nb_display_channels; ch++) {
838             y = s->ytop + ch * h;
839             fill_rectangle(screen,
840                            s->xleft, y, s->width, 1,
841                            fgcolor);
842         }
843         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
844     } else {
845         nb_display_channels= FFMIN(nb_display_channels, 2);
846         if (rdft_bits != s->rdft_bits) {
847             av_rdft_end(s->rdft);
848             av_free(s->rdft_data);
849             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
850             s->rdft_bits = rdft_bits;
851             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
852         }
853         {
854             FFTSample *data[2];
855             for (ch = 0; ch < nb_display_channels; ch++) {
856                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
857                 i = i_start + ch;
858                 for (x = 0; x < 2 * nb_freq; x++) {
859                     double w = (x-nb_freq) * (1.0 / nb_freq);
860                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
861                     i += channels;
862                     if (i >= SAMPLE_ARRAY_SIZE)
863                         i -= SAMPLE_ARRAY_SIZE;
864                 }
865                 av_rdft_calc(s->rdft, data[ch]);
866             }
867             // least efficient way to do this, we should of course directly access it but its more than fast enough
868             for (y = 0; y < s->height; y++) {
869                 double w = 1 / sqrt(nb_freq);
870                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
871                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
872                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
873                 a = FFMIN(a, 255);
874                 b = FFMIN(b, 255);
875                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
876
877                 fill_rectangle(screen,
878                             s->xpos, s->height-y, 1, 1,
879                             fgcolor);
880             }
881         }
882         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
883         if (!s->paused)
884             s->xpos++;
885         if (s->xpos >= s->width)
886             s->xpos= s->xleft;
887     }
888 }
889
890 static void stream_close(VideoState *is)
891 {
892     VideoPicture *vp;
893     int i;
894     /* XXX: use a special url_shutdown call to abort parse cleanly */
895     is->abort_request = 1;
896     SDL_WaitThread(is->read_tid, NULL);
897     SDL_WaitThread(is->refresh_tid, NULL);
898     packet_queue_destroy(&is->videoq);
899     packet_queue_destroy(&is->audioq);
900     packet_queue_destroy(&is->subtitleq);
901
902     /* free all pictures */
903     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
904         vp = &is->pictq[i];
905 #if CONFIG_AVFILTER
906         if (vp->picref) {
907             avfilter_unref_buffer(vp->picref);
908             vp->picref = NULL;
909         }
910 #endif
911         if (vp->bmp) {
912             SDL_FreeYUVOverlay(vp->bmp);
913             vp->bmp = NULL;
914         }
915     }
916     SDL_DestroyMutex(is->pictq_mutex);
917     SDL_DestroyCond(is->pictq_cond);
918     SDL_DestroyMutex(is->subpq_mutex);
919     SDL_DestroyCond(is->subpq_cond);
920 #if !CONFIG_AVFILTER
921     if (is->img_convert_ctx)
922         sws_freeContext(is->img_convert_ctx);
923 #endif
924     av_free(is);
925 }
926
927 static void do_exit(VideoState *is)
928 {
929     if (is) {
930         stream_close(is);
931     }
932     av_lockmgr_register(NULL);
933     uninit_opts();
934 #if CONFIG_AVFILTER
935     avfilter_uninit();
936 #endif
937     avformat_network_deinit();
938     if (show_status)
939         printf("\n");
940     SDL_Quit();
941     av_log(NULL, AV_LOG_QUIET, "%s", "");
942     exit(0);
943 }
944
945 static void sigterm_handler(int sig)
946 {
947     exit(123);
948 }
949
950 static int video_open(VideoState *is, int force_set_video_mode)
951 {
952     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
953     int w,h;
954     VideoPicture *vp = &is->pictq[is->pictq_rindex];
955
956     if (is_full_screen) flags |= SDL_FULLSCREEN;
957     else                flags |= SDL_RESIZABLE;
958
959     if (is_full_screen && fs_screen_width) {
960         w = fs_screen_width;
961         h = fs_screen_height;
962     } else if (!is_full_screen && screen_width) {
963         w = screen_width;
964         h = screen_height;
965     } else if (vp->width) {
966         w = vp->width;
967         h = vp->height;
968     } else {
969         w = 640;
970         h = 480;
971     }
972     if (screen && is->width == screen->w && screen->w == w
973        && is->height== screen->h && screen->h == h && !force_set_video_mode)
974         return 0;
975     screen = SDL_SetVideoMode(w, h, 0, flags);
976     if (!screen) {
977         fprintf(stderr, "SDL: could not set video mode - exiting\n");
978         do_exit(is);
979     }
980     if (!window_title)
981         window_title = input_filename;
982     SDL_WM_SetCaption(window_title, window_title);
983
984     is->width  = screen->w;
985     is->height = screen->h;
986
987     return 0;
988 }
989
990 /* display the current picture, if any */
991 static void video_display(VideoState *is)
992 {
993     if (!screen)
994         video_open(is, 0);
995     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
996         video_audio_display(is);
997     else if (is->video_st)
998         video_image_display(is);
999 }
1000
1001 static int refresh_thread(void *opaque)
1002 {
1003     VideoState *is= opaque;
1004     while (!is->abort_request) {
1005         SDL_Event event;
1006         event.type = FF_REFRESH_EVENT;
1007         event.user.data1 = opaque;
1008         if (!is->refresh && (!is->paused || is->force_refresh)) {
1009             is->refresh = 1;
1010             SDL_PushEvent(&event);
1011         }
1012         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1013         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1014     }
1015     return 0;
1016 }
1017
1018 /* get the current audio clock value */
1019 static double get_audio_clock(VideoState *is)
1020 {
1021     if (is->paused) {
1022         return is->audio_current_pts;
1023     } else {
1024         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1025     }
1026 }
1027
1028 /* get the current video clock value */
1029 static double get_video_clock(VideoState *is)
1030 {
1031     if (is->paused) {
1032         return is->video_current_pts;
1033     } else {
1034         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1035     }
1036 }
1037
1038 /* get the current external clock value */
1039 static double get_external_clock(VideoState *is)
1040 {
1041     int64_t ti;
1042     ti = av_gettime();
1043     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1044 }
1045
1046 /* get the current master clock value */
1047 static double get_master_clock(VideoState *is)
1048 {
1049     double val;
1050
1051     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1052         if (is->video_st)
1053             val = get_video_clock(is);
1054         else
1055             val = get_audio_clock(is);
1056     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1057         if (is->audio_st)
1058             val = get_audio_clock(is);
1059         else
1060             val = get_video_clock(is);
1061     } else {
1062         val = get_external_clock(is);
1063     }
1064     return val;
1065 }
1066
1067 /* seek in the stream */
1068 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1069 {
1070     if (!is->seek_req) {
1071         is->seek_pos = pos;
1072         is->seek_rel = rel;
1073         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1074         if (seek_by_bytes)
1075             is->seek_flags |= AVSEEK_FLAG_BYTE;
1076         is->seek_req = 1;
1077     }
1078 }
1079
1080 /* pause or resume the video */
1081 static void stream_toggle_pause(VideoState *is)
1082 {
1083     if (is->paused) {
1084         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1085         if (is->read_pause_return != AVERROR(ENOSYS)) {
1086             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1087         }
1088         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1089     }
1090     is->paused = !is->paused;
1091 }
1092
1093 static double compute_target_delay(double delay, VideoState *is)
1094 {
1095     double sync_threshold, diff;
1096
1097     /* update delay to follow master synchronisation source */
1098     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1099          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1100         /* if video is slave, we try to correct big delays by
1101            duplicating or deleting a frame */
1102         diff = get_video_clock(is) - get_master_clock(is);
1103
1104         /* skip or repeat frame. We take into account the
1105            delay to compute the threshold. I still don't know
1106            if it is the best guess */
1107         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1108         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1109             if (diff <= -sync_threshold)
1110                 delay = 0;
1111             else if (diff >= sync_threshold)
1112                 delay = 2 * delay;
1113         }
1114     }
1115
1116     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1117             delay, -diff);
1118
1119     return delay;
1120 }
1121
1122 static void pictq_next_picture(VideoState *is) {
1123     /* update queue size and signal for next picture */
1124     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1125         is->pictq_rindex = 0;
1126
1127     SDL_LockMutex(is->pictq_mutex);
1128     is->pictq_size--;
1129     SDL_CondSignal(is->pictq_cond);
1130     SDL_UnlockMutex(is->pictq_mutex);
1131 }
1132
1133 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1134     double time = av_gettime() / 1000000.0;
1135     /* update current video pts */
1136     is->video_current_pts = pts;
1137     is->video_current_pts_drift = is->video_current_pts - time;
1138     is->video_current_pos = pos;
1139     is->frame_last_pts = pts;
1140 }
1141
1142 /* called to display each frame */
1143 static void video_refresh(void *opaque)
1144 {
1145     VideoState *is = opaque;
1146     VideoPicture *vp;
1147     double time;
1148
1149     SubPicture *sp, *sp2;
1150
1151     if (is->video_st) {
1152 retry:
1153         if (is->pictq_size == 0) {
1154             SDL_LockMutex(is->pictq_mutex);
1155             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1156                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1157                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1158             }
1159             SDL_UnlockMutex(is->pictq_mutex);
1160             // nothing to do, no picture to display in the que
1161         } else {
1162             double last_duration, duration, delay;
1163             /* dequeue the picture */
1164             vp = &is->pictq[is->pictq_rindex];
1165
1166             if (vp->skip) {
1167                 pictq_next_picture(is);
1168                 goto retry;
1169             }
1170
1171             if (is->paused)
1172                 goto display;
1173
1174             /* compute nominal last_duration */
1175             last_duration = vp->pts - is->frame_last_pts;
1176             if (last_duration > 0 && last_duration < 10.0) {
1177                 /* if duration of the last frame was sane, update last_duration in video state */
1178                 is->frame_last_duration = last_duration;
1179             }
1180             delay = compute_target_delay(is->frame_last_duration, is);
1181
1182             time= av_gettime()/1000000.0;
1183             if (time < is->frame_timer + delay)
1184                 return;
1185
1186             if (delay > 0)
1187                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1188
1189             SDL_LockMutex(is->pictq_mutex);
1190             update_video_pts(is, vp->pts, vp->pos);
1191             SDL_UnlockMutex(is->pictq_mutex);
1192
1193             if (is->pictq_size > 1) {
1194                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1195                 duration = nextvp->pts - vp->pts;
1196                 if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1197                     is->frame_drops_late++;
1198                     pictq_next_picture(is);
1199                     goto retry;
1200                 }
1201             }
1202
1203             if (is->subtitle_st) {
1204                 if (is->subtitle_stream_changed) {
1205                     SDL_LockMutex(is->subpq_mutex);
1206
1207                     while (is->subpq_size) {
1208                         free_subpicture(&is->subpq[is->subpq_rindex]);
1209
1210                         /* update queue size and signal for next picture */
1211                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212                             is->subpq_rindex = 0;
1213
1214                         is->subpq_size--;
1215                     }
1216                     is->subtitle_stream_changed = 0;
1217
1218                     SDL_CondSignal(is->subpq_cond);
1219                     SDL_UnlockMutex(is->subpq_mutex);
1220                 } else {
1221                     if (is->subpq_size > 0) {
1222                         sp = &is->subpq[is->subpq_rindex];
1223
1224                         if (is->subpq_size > 1)
1225                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226                         else
1227                             sp2 = NULL;
1228
1229                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231                         {
1232                             free_subpicture(sp);
1233
1234                             /* update queue size and signal for next picture */
1235                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236                                 is->subpq_rindex = 0;
1237
1238                             SDL_LockMutex(is->subpq_mutex);
1239                             is->subpq_size--;
1240                             SDL_CondSignal(is->subpq_cond);
1241                             SDL_UnlockMutex(is->subpq_mutex);
1242                         }
1243                     }
1244                 }
1245             }
1246
1247 display:
1248             /* display picture */
1249             if (!display_disable)
1250                 video_display(is);
1251
1252             if (!is->paused)
1253                 pictq_next_picture(is);
1254         }
1255     } else if (is->audio_st) {
1256         /* draw the next audio frame */
1257
1258         /* if only audio stream, then display the audio bars (better
1259            than nothing, just to test the implementation */
1260
1261         /* display picture */
1262         if (!display_disable)
1263             video_display(is);
1264     }
1265     is->force_refresh = 0;
1266     if (show_status) {
1267         static int64_t last_time;
1268         int64_t cur_time;
1269         int aqsize, vqsize, sqsize;
1270         double av_diff;
1271
1272         cur_time = av_gettime();
1273         if (!last_time || (cur_time - last_time) >= 30000) {
1274             aqsize = 0;
1275             vqsize = 0;
1276             sqsize = 0;
1277             if (is->audio_st)
1278                 aqsize = is->audioq.size;
1279             if (is->video_st)
1280                 vqsize = is->videoq.size;
1281             if (is->subtitle_st)
1282                 sqsize = is->subtitleq.size;
1283             av_diff = 0;
1284             if (is->audio_st && is->video_st)
1285                 av_diff = get_audio_clock(is) - get_video_clock(is);
1286             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1287                    get_master_clock(is),
1288                    av_diff,
1289                    is->frame_drops_early + is->frame_drops_late,
1290                    aqsize / 1024,
1291                    vqsize / 1024,
1292                    sqsize,
1293                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1294                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1295             fflush(stdout);
1296             last_time = cur_time;
1297         }
1298     }
1299 }
1300
1301 /* allocate a picture (needs to do that in main thread to avoid
1302    potential locking problems */
1303 static void alloc_picture(AllocEventProps *event_props)
1304 {
1305     VideoState *is = event_props->is;
1306     AVFrame *frame = event_props->frame;
1307     VideoPicture *vp;
1308
1309     vp = &is->pictq[is->pictq_windex];
1310
1311     if (vp->bmp)
1312         SDL_FreeYUVOverlay(vp->bmp);
1313
1314 #if CONFIG_AVFILTER
1315     if (vp->picref)
1316         avfilter_unref_buffer(vp->picref);
1317     vp->picref = NULL;
1318 #endif
1319
1320     vp->width   = frame->width;
1321     vp->height  = frame->height;
1322
1323     video_open(event_props->is, 0);
1324
1325     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1326                                    SDL_YV12_OVERLAY,
1327                                    screen);
1328     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1329         /* SDL allocates a buffer smaller than requested if the video
1330          * overlay hardware is unable to support the requested size. */
1331         fprintf(stderr, "Error: the video system does not support an image\n"
1332                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1333                         "to reduce the image size.\n", vp->width, vp->height );
1334         do_exit(is);
1335     }
1336
1337     SDL_LockMutex(is->pictq_mutex);
1338     vp->allocated = 1;
1339     SDL_CondSignal(is->pictq_cond);
1340     SDL_UnlockMutex(is->pictq_mutex);
1341 }
1342
1343 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1344 {
1345     VideoPicture *vp;
1346     double frame_delay, pts = pts1;
1347
1348     /* compute the exact PTS for the picture if it is omitted in the stream
1349      * pts1 is the dts of the pkt / pts of the frame */
1350     if (pts != 0) {
1351         /* update video clock with pts, if present */
1352         is->video_clock = pts;
1353     } else {
1354         pts = is->video_clock;
1355     }
1356     /* update video clock for next frame */
1357     frame_delay = av_q2d(is->video_st->codec->time_base);
1358     /* for MPEG2, the frame can be repeated, so we update the
1359        clock accordingly */
1360     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1361     is->video_clock += frame_delay;
1362
1363 #if defined(DEBUG_SYNC) && 0
1364     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1365            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1366 #endif
1367
1368     /* wait until we have space to put a new picture */
1369     SDL_LockMutex(is->pictq_mutex);
1370
1371     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1372            !is->videoq.abort_request) {
1373         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1374     }
1375     SDL_UnlockMutex(is->pictq_mutex);
1376
1377     if (is->videoq.abort_request)
1378         return -1;
1379
1380     vp = &is->pictq[is->pictq_windex];
1381
1382     /* alloc or resize hardware picture buffer */
1383     if (!vp->bmp || vp->reallocate ||
1384         vp->width  != src_frame->width ||
1385         vp->height != src_frame->height) {
1386         SDL_Event event;
1387         AllocEventProps event_props;
1388
1389         event_props.frame = src_frame;
1390         event_props.is = is;
1391
1392         vp->allocated  = 0;
1393         vp->reallocate = 0;
1394
1395         /* the allocation must be done in the main thread to avoid
1396            locking problems. We wait in this block for the event to complete,
1397            so we can pass a pointer to event_props to it. */
1398         event.type = FF_ALLOC_EVENT;
1399         event.user.data1 = &event_props;
1400         SDL_PushEvent(&event);
1401
1402         /* wait until the picture is allocated */
1403         SDL_LockMutex(is->pictq_mutex);
1404         while (!vp->allocated && !is->videoq.abort_request) {
1405             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1406         }
1407         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1408         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1409             while (!vp->allocated) {
1410                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1411             }
1412         }
1413         SDL_UnlockMutex(is->pictq_mutex);
1414
1415         if (is->videoq.abort_request)
1416             return -1;
1417     }
1418
1419     /* if the frame is not skipped, then display it */
1420     if (vp->bmp) {
1421         AVPicture pict = { { 0 } };
1422 #if CONFIG_AVFILTER
1423         if (vp->picref)
1424             avfilter_unref_buffer(vp->picref);
1425         vp->picref = src_frame->opaque;
1426 #endif
1427
1428         /* get a pointer on the bitmap */
1429         SDL_LockYUVOverlay (vp->bmp);
1430
1431         pict.data[0] = vp->bmp->pixels[0];
1432         pict.data[1] = vp->bmp->pixels[2];
1433         pict.data[2] = vp->bmp->pixels[1];
1434
1435         pict.linesize[0] = vp->bmp->pitches[0];
1436         pict.linesize[1] = vp->bmp->pitches[2];
1437         pict.linesize[2] = vp->bmp->pitches[1];
1438
1439 #if CONFIG_AVFILTER
1440         // FIXME use direct rendering
1441         av_picture_copy(&pict, (AVPicture *)src_frame,
1442                         src_frame->format, vp->width, vp->height);
1443         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1444 #else
1445         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1446         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1447             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1448             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1449         if (is->img_convert_ctx == NULL) {
1450             fprintf(stderr, "Cannot initialize the conversion context\n");
1451             exit(1);
1452         }
1453         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1454                   0, vp->height, pict.data, pict.linesize);
1455         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1456 #endif
1457         /* update the bitmap content */
1458         SDL_UnlockYUVOverlay(vp->bmp);
1459
1460         vp->pts = pts;
1461         vp->pos = pos;
1462         vp->skip = 0;
1463
1464         /* now we can update the picture count */
1465         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1466             is->pictq_windex = 0;
1467         SDL_LockMutex(is->pictq_mutex);
1468         is->pictq_size++;
1469         SDL_UnlockMutex(is->pictq_mutex);
1470     }
1471     return 0;
1472 }
1473
1474 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1475 {
1476     int got_picture, i;
1477
1478     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1479         return -1;
1480
1481     if (pkt->data == flush_pkt.data) {
1482         avcodec_flush_buffers(is->video_st->codec);
1483
1484         SDL_LockMutex(is->pictq_mutex);
1485         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1486         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1487             is->pictq[i].skip = 1;
1488         }
1489         while (is->pictq_size && !is->videoq.abort_request) {
1490             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1491         }
1492         is->video_current_pos = -1;
1493         is->frame_last_pts = AV_NOPTS_VALUE;
1494         is->frame_last_duration = 0;
1495         is->frame_timer = (double)av_gettime() / 1000000.0;
1496         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1497         SDL_UnlockMutex(is->pictq_mutex);
1498
1499         return 0;
1500     }
1501
1502     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1503
1504     if (got_picture) {
1505         int ret = 1;
1506
1507         if (decoder_reorder_pts == -1) {
1508             *pts = av_frame_get_best_effort_timestamp(frame);
1509         } else if (decoder_reorder_pts) {
1510             *pts = frame->pkt_pts;
1511         } else {
1512             *pts = frame->pkt_dts;
1513         }
1514
1515         if (*pts == AV_NOPTS_VALUE) {
1516             *pts = 0;
1517         }
1518
1519         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1520              (framedrop>0 || (framedrop && is->audio_st))) {
1521             SDL_LockMutex(is->pictq_mutex);
1522             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1523                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1524                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1525                 double ptsdiff = dpts - is->frame_last_pts;
1526                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1527                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1528                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1529                     is->frame_last_dropped_pos = pkt->pos;
1530                     is->frame_last_dropped_pts = dpts;
1531                     is->frame_drops_early++;
1532                     ret = 0;
1533                 }
1534             }
1535             SDL_UnlockMutex(is->pictq_mutex);
1536         }
1537
1538         if (ret)
1539             is->frame_last_returned_time = av_gettime() / 1000000.0;
1540
1541         return ret;
1542     }
1543     return 0;
1544 }
1545
1546 #if CONFIG_AVFILTER
1547 typedef struct {
1548     VideoState *is;
1549     AVFrame *frame;
1550     int use_dr1;
1551 } FilterPriv;
1552
1553 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1554 {
1555     AVFilterContext *ctx = codec->opaque;
1556     AVFilterBufferRef  *ref;
1557     int perms = AV_PERM_WRITE;
1558     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1559     unsigned edge;
1560     int pixel_size;
1561
1562     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1563
1564     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1565         perms |= AV_PERM_NEG_LINESIZES;
1566
1567     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1568         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1569         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1570         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1571     }
1572     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1573
1574     w = codec->width;
1575     h = codec->height;
1576
1577     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1578         return -1;
1579
1580     avcodec_align_dimensions2(codec, &w, &h, stride);
1581     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1582     w += edge << 1;
1583     h += edge << 1;
1584     if (codec->pix_fmt != ctx->outputs[0]->format) {
1585         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1586         return -1;
1587     }
1588     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1589         return -1;
1590
1591     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1592     ref->video->w = codec->width;
1593     ref->video->h = codec->height;
1594     for (i = 0; i < 4; i ++) {
1595         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1596         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1597
1598         pic->base[i]     = ref->data[i];
1599         if (ref->data[i]) {
1600             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1601         }
1602         pic->data[i]     = ref->data[i];
1603         pic->linesize[i] = ref->linesize[i];
1604     }
1605     pic->opaque = ref;
1606     pic->type   = FF_BUFFER_TYPE_USER;
1607     pic->reordered_opaque = codec->reordered_opaque;
1608     pic->width               = codec->width;
1609     pic->height              = codec->height;
1610     pic->format              = codec->pix_fmt;
1611     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1612     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1613     else            pic->pkt_pts = AV_NOPTS_VALUE;
1614     return 0;
1615 }
1616
1617 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1618 {
1619     memset(pic->data, 0, sizeof(pic->data));
1620     avfilter_unref_buffer(pic->opaque);
1621 }
1622
1623 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1624 {
1625     AVFilterBufferRef *ref = pic->opaque;
1626
1627     if (pic->data[0] == NULL) {
1628         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1629         return codec->get_buffer(codec, pic);
1630     }
1631
1632     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1633         (codec->pix_fmt != ref->format)) {
1634         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1635         return -1;
1636     }
1637
1638     pic->reordered_opaque = codec->reordered_opaque;
1639     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1640     else            pic->pkt_pts = AV_NOPTS_VALUE;
1641     return 0;
1642 }
1643
1644 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1645 {
1646     FilterPriv *priv = ctx->priv;
1647     AVCodecContext *codec;
1648     if (!opaque) return -1;
1649
1650     priv->is = opaque;
1651     codec    = priv->is->video_st->codec;
1652     codec->opaque = ctx;
1653     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1654         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1655         priv->use_dr1 = 1;
1656         codec->get_buffer     = input_get_buffer;
1657         codec->release_buffer = input_release_buffer;
1658         codec->reget_buffer   = input_reget_buffer;
1659         codec->thread_safe_callbacks = 1;
1660     }
1661
1662     priv->frame = avcodec_alloc_frame();
1663
1664     return 0;
1665 }
1666
1667 static void input_uninit(AVFilterContext *ctx)
1668 {
1669     FilterPriv *priv = ctx->priv;
1670     av_free(priv->frame);
1671 }
1672
1673 static int input_request_frame(AVFilterLink *link)
1674 {
1675     FilterPriv *priv = link->src->priv;
1676     AVFilterBufferRef *picref;
1677     int64_t pts = 0;
1678     AVPacket pkt;
1679     int ret;
1680
1681     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1682         av_free_packet(&pkt);
1683     if (ret < 0)
1684         return -1;
1685
1686     if (priv->use_dr1 && priv->frame->opaque) {
1687         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1688     } else {
1689         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
1690         av_image_copy(picref->data, picref->linesize,
1691                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1692                       picref->format, priv->frame->width, priv->frame->height);
1693     }
1694     av_free_packet(&pkt);
1695
1696     avfilter_copy_frame_props(picref, priv->frame);
1697     picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
1698     picref->pts = pts;
1699
1700     avfilter_start_frame(link, picref);
1701     avfilter_draw_slice(link, 0, picref->video->h, 1);
1702     avfilter_end_frame(link);
1703
1704     return 0;
1705 }
1706
1707 static int input_query_formats(AVFilterContext *ctx)
1708 {
1709     FilterPriv *priv = ctx->priv;
1710     enum PixelFormat pix_fmts[] = {
1711         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1712     };
1713
1714     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1715     return 0;
1716 }
1717
1718 static int input_config_props(AVFilterLink *link)
1719 {
1720     FilterPriv *priv  = link->src->priv;
1721     AVStream *s = priv->is->video_st;
1722
1723     link->w = s->codec->width;
1724     link->h = s->codec->height;
1725     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1726         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1727     link->time_base = s->time_base;
1728
1729     return 0;
1730 }
1731
1732 static AVFilter input_filter =
1733 {
1734     .name      = "ffplay_input",
1735
1736     .priv_size = sizeof(FilterPriv),
1737
1738     .init      = input_init,
1739     .uninit    = input_uninit,
1740
1741     .query_formats = input_query_formats,
1742
1743     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1744     .outputs   = (AVFilterPad[]) {{ .name = "default",
1745                                     .type = AVMEDIA_TYPE_VIDEO,
1746                                     .request_frame = input_request_frame,
1747                                     .config_props  = input_config_props, },
1748                                   { .name = NULL }},
1749 };
1750
1751 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1752 {
1753     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1754     char sws_flags_str[128];
1755     int ret;
1756     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1757     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
1758     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1759     graph->scale_sws_opts = av_strdup(sws_flags_str);
1760
1761     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1762                                             NULL, is, graph)) < 0)
1763         return ret;
1764
1765 #if FF_API_OLD_VSINK_API
1766     ret = avfilter_graph_create_filter(&filt_out,
1767                                        avfilter_get_by_name("buffersink"),
1768                                        "out", NULL, pix_fmts, graph);
1769 #else
1770     buffersink_params->pixel_fmts = pix_fmts;
1771     ret = avfilter_graph_create_filter(&filt_out,
1772                                        avfilter_get_by_name("buffersink"),
1773                                        "out", NULL, buffersink_params, graph);
1774 #endif
1775     av_freep(&buffersink_params);
1776     if (ret < 0)
1777         return ret;
1778
1779     if ((ret = avfilter_graph_create_filter(&filt_format,
1780                                             avfilter_get_by_name("format"),
1781                                             "format", "yuv420p", NULL, graph)) < 0)
1782         return ret;
1783     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1784         return ret;
1785
1786
1787     if (vfilters) {
1788         AVFilterInOut *outputs = avfilter_inout_alloc();
1789         AVFilterInOut *inputs  = avfilter_inout_alloc();
1790
1791         outputs->name    = av_strdup("in");
1792         outputs->filter_ctx = filt_src;
1793         outputs->pad_idx = 0;
1794         outputs->next    = NULL;
1795
1796         inputs->name    = av_strdup("out");
1797         inputs->filter_ctx = filt_format;
1798         inputs->pad_idx = 0;
1799         inputs->next    = NULL;
1800
1801         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1802             return ret;
1803     } else {
1804         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1805             return ret;
1806     }
1807
1808     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1809         return ret;
1810
1811     is->out_video_filter = filt_out;
1812
1813     return ret;
1814 }
1815
1816 #endif  /* CONFIG_AVFILTER */
1817
1818 static int video_thread(void *arg)
1819 {
1820     VideoState *is = arg;
1821     AVFrame *frame = avcodec_alloc_frame();
1822     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1823     double pts;
1824     int ret;
1825
1826 #if CONFIG_AVFILTER
1827     AVFilterGraph *graph = avfilter_graph_alloc();
1828     AVFilterContext *filt_out = NULL;
1829     int last_w = is->video_st->codec->width;
1830     int last_h = is->video_st->codec->height;
1831
1832     if ((ret = configure_video_filters(graph, is, vfilters)) < 0) {
1833         SDL_Event event;
1834         event.type = FF_QUIT_EVENT;
1835         event.user.data1 = is;
1836         SDL_PushEvent(&event);
1837         goto the_end;
1838     }
1839     filt_out = is->out_video_filter;
1840 #endif
1841
1842     for (;;) {
1843 #if !CONFIG_AVFILTER
1844         AVPacket pkt;
1845 #else
1846         AVFilterBufferRef *picref;
1847         AVRational tb = filt_out->inputs[0]->time_base;
1848 #endif
1849         while (is->paused && !is->videoq.abort_request)
1850             SDL_Delay(10);
1851 #if CONFIG_AVFILTER
1852         if (   last_w != is->video_st->codec->width
1853             || last_h != is->video_st->codec->height) {
1854             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1855                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1856             avfilter_graph_free(&graph);
1857             graph = avfilter_graph_alloc();
1858             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1859                 goto the_end;
1860             filt_out = is->out_video_filter;
1861             last_w = is->video_st->codec->width;
1862             last_h = is->video_st->codec->height;
1863         }
1864         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1865         if (picref) {
1866             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1867             pts_int = picref->pts;
1868             tb      = filt_out->inputs[0]->time_base;
1869             pos     = picref->pos;
1870             frame->opaque = picref;
1871
1872             ret = 1;
1873         }
1874
1875         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1876             av_unused int64_t pts1 = pts_int;
1877             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1878             av_dlog(NULL, "video_thread(): "
1879                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1880                     tb.num, tb.den, pts1,
1881                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1882         }
1883 #else
1884         ret = get_video_frame(is, frame, &pts_int, &pkt);
1885         pos = pkt.pos;
1886         av_free_packet(&pkt);
1887         if (ret == 0)
1888             continue;
1889 #endif
1890
1891         if (ret < 0)
1892             goto the_end;
1893
1894         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1895         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1896             is->frame_last_filter_delay = 0;
1897
1898 #if CONFIG_AVFILTER
1899         if (!picref)
1900             continue;
1901 #endif
1902
1903         pts = pts_int * av_q2d(is->video_st->time_base);
1904
1905         ret = queue_picture(is, frame, pts, pos);
1906
1907         if (ret < 0)
1908             goto the_end;
1909
1910         if (is->step)
1911             stream_toggle_pause(is);
1912     }
1913  the_end:
1914     avcodec_flush_buffers(is->video_st->codec);
1915 #if CONFIG_AVFILTER
1916     av_freep(&vfilters);
1917     avfilter_graph_free(&graph);
1918 #endif
1919     av_free(frame);
1920     return 0;
1921 }
1922
1923 static int subtitle_thread(void *arg)
1924 {
1925     VideoState *is = arg;
1926     SubPicture *sp;
1927     AVPacket pkt1, *pkt = &pkt1;
1928     int got_subtitle;
1929     double pts;
1930     int i, j;
1931     int r, g, b, y, u, v, a;
1932
1933     for (;;) {
1934         while (is->paused && !is->subtitleq.abort_request) {
1935             SDL_Delay(10);
1936         }
1937         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1938             break;
1939
1940         if (pkt->data == flush_pkt.data) {
1941             avcodec_flush_buffers(is->subtitle_st->codec);
1942             continue;
1943         }
1944         SDL_LockMutex(is->subpq_mutex);
1945         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1946                !is->subtitleq.abort_request) {
1947             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1948         }
1949         SDL_UnlockMutex(is->subpq_mutex);
1950
1951         if (is->subtitleq.abort_request)
1952             return 0;
1953
1954         sp = &is->subpq[is->subpq_windex];
1955
1956        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1957            this packet, if any */
1958         pts = 0;
1959         if (pkt->pts != AV_NOPTS_VALUE)
1960             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1961
1962         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1963                                  &got_subtitle, pkt);
1964
1965         if (got_subtitle && sp->sub.format == 0) {
1966             sp->pts = pts;
1967
1968             for (i = 0; i < sp->sub.num_rects; i++)
1969             {
1970                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1971                 {
1972                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1973                     y = RGB_TO_Y_CCIR(r, g, b);
1974                     u = RGB_TO_U_CCIR(r, g, b, 0);
1975                     v = RGB_TO_V_CCIR(r, g, b, 0);
1976                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1977                 }
1978             }
1979
1980             /* now we can update the picture count */
1981             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1982                 is->subpq_windex = 0;
1983             SDL_LockMutex(is->subpq_mutex);
1984             is->subpq_size++;
1985             SDL_UnlockMutex(is->subpq_mutex);
1986         }
1987         av_free_packet(pkt);
1988     }
1989     return 0;
1990 }
1991
1992 /* copy samples for viewing in editor window */
1993 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1994 {
1995     int size, len;
1996
1997     size = samples_size / sizeof(short);
1998     while (size > 0) {
1999         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2000         if (len > size)
2001             len = size;
2002         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2003         samples += len;
2004         is->sample_array_index += len;
2005         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2006             is->sample_array_index = 0;
2007         size -= len;
2008     }
2009 }
2010
2011 /* return the wanted number of samples to get better sync if sync_type is video
2012  * or external master clock */
2013 static int synchronize_audio(VideoState *is, int nb_samples)
2014 {
2015     int wanted_nb_samples = nb_samples;
2016
2017     /* if not master, then we try to remove or add samples to correct the clock */
2018     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2019          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2020         double diff, avg_diff;
2021         int min_nb_samples, max_nb_samples;
2022
2023         diff = get_audio_clock(is) - get_master_clock(is);
2024
2025         if (diff < AV_NOSYNC_THRESHOLD) {
2026             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2027             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2028                 /* not enough measures to have a correct estimate */
2029                 is->audio_diff_avg_count++;
2030             } else {
2031                 /* estimate the A-V difference */
2032                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2033
2034                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2035                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
2036                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2037                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2038                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2039                 }
2040                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2041                         diff, avg_diff, wanted_nb_samples - nb_samples,
2042                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2043             }
2044         } else {
2045             /* too big difference : may be initial PTS errors, so
2046                reset A-V filter */
2047             is->audio_diff_avg_count = 0;
2048             is->audio_diff_cum       = 0;
2049         }
2050     }
2051
2052     return wanted_nb_samples;
2053 }
2054
2055 /* decode one audio frame and returns its uncompressed size */
2056 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2057 {
2058     AVPacket *pkt_temp = &is->audio_pkt_temp;
2059     AVPacket *pkt = &is->audio_pkt;
2060     AVCodecContext *dec = is->audio_st->codec;
2061     int len1, len2, data_size, resampled_data_size;
2062     int64_t dec_channel_layout;
2063     int got_frame;
2064     double pts;
2065     int new_packet = 0;
2066     int flush_complete = 0;
2067     int wanted_nb_samples;
2068
2069     for (;;) {
2070         /* NOTE: the audio packet can contain several frames */
2071         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2072             if (!is->frame) {
2073                 if (!(is->frame = avcodec_alloc_frame()))
2074                     return AVERROR(ENOMEM);
2075             } else
2076                 avcodec_get_frame_defaults(is->frame);
2077
2078             if (is->paused)
2079                 return -1;
2080
2081             if (flush_complete)
2082                 break;
2083             new_packet = 0;
2084             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2085             if (len1 < 0) {
2086                 /* if error, we skip the frame */
2087                 pkt_temp->size = 0;
2088                 break;
2089             }
2090
2091             pkt_temp->data += len1;
2092             pkt_temp->size -= len1;
2093
2094             if (!got_frame) {
2095                 /* stop sending empty packets if the decoder is finished */
2096                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2097                     flush_complete = 1;
2098                 continue;
2099             }
2100             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2101                                                    is->frame->nb_samples,
2102                                                    dec->sample_fmt, 1);
2103
2104             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2105             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2106
2107             if (dec->sample_fmt != is->audio_src_fmt ||
2108                 dec_channel_layout != is->audio_src_channel_layout ||
2109                 dec->sample_rate != is->audio_src_freq ||
2110                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2111                 if (is->swr_ctx)
2112                     swr_free(&is->swr_ctx);
2113                 is->swr_ctx = swr_alloc_set_opts(NULL,
2114                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2115                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2116                                                  0, NULL);
2117                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2118                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2119                         dec->sample_rate,
2120                         av_get_sample_fmt_name(dec->sample_fmt),
2121                         dec->channels,
2122                         is->audio_tgt_freq,
2123                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2124                         is->audio_tgt_channels);
2125                     break;
2126                 }
2127                 is->audio_src_channel_layout = dec_channel_layout;
2128                 is->audio_src_channels = dec->channels;
2129                 is->audio_src_freq = dec->sample_rate;
2130                 is->audio_src_fmt = dec->sample_fmt;
2131             }
2132
2133             resampled_data_size = data_size;
2134             if (is->swr_ctx) {
2135                 const uint8_t *in[] = { is->frame->data[0] };
2136                 uint8_t *out[] = {is->audio_buf2};
2137                 if (wanted_nb_samples != is->frame->nb_samples) {
2138                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
2139                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
2140                         fprintf(stderr, "swr_set_compensation() failed\n");
2141                         break;
2142                     }
2143                 }
2144                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2145                                                 in, is->frame->nb_samples);
2146                 if (len2 < 0) {
2147                     fprintf(stderr, "audio_resample() failed\n");
2148                     break;
2149                 }
2150                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2151                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2152                     swr_init(is->swr_ctx);
2153                 }
2154                 is->audio_buf = is->audio_buf2;
2155                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2156             } else {
2157                 is->audio_buf = is->frame->data[0];
2158             }
2159
2160             /* if no pts, then compute it */
2161             pts = is->audio_clock;
2162             *pts_ptr = pts;
2163             is->audio_clock += (double)data_size /
2164                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2165 #ifdef DEBUG
2166             {
2167                 static double last_clock;
2168                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2169                        is->audio_clock - last_clock,
2170                        is->audio_clock, pts);
2171                 last_clock = is->audio_clock;
2172             }
2173 #endif
2174             return resampled_data_size;
2175         }
2176
2177         /* free the current packet */
2178         if (pkt->data)
2179             av_free_packet(pkt);
2180         memset(pkt_temp, 0, sizeof(*pkt_temp));
2181
2182         if (is->paused || is->audioq.abort_request) {
2183             return -1;
2184         }
2185
2186         /* read next packet */
2187         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2188             return -1;
2189
2190         if (pkt->data == flush_pkt.data) {
2191             avcodec_flush_buffers(dec);
2192             flush_complete = 0;
2193         }
2194
2195         *pkt_temp = *pkt;
2196
2197         /* if update the audio clock with the pts */
2198         if (pkt->pts != AV_NOPTS_VALUE) {
2199             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2200         }
2201     }
2202 }
2203
2204 /* prepare a new audio buffer */
2205 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2206 {
2207     VideoState *is = opaque;
2208     int audio_size, len1;
2209     int bytes_per_sec;
2210     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
2211     double pts;
2212
2213     audio_callback_time = av_gettime();
2214
2215     while (len > 0) {
2216         if (is->audio_buf_index >= is->audio_buf_size) {
2217            audio_size = audio_decode_frame(is, &pts);
2218            if (audio_size < 0) {
2219                 /* if error, just output silence */
2220                is->audio_buf      = is->silence_buf;
2221                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2222            } else {
2223                if (is->show_mode != SHOW_MODE_VIDEO)
2224                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2225                is->audio_buf_size = audio_size;
2226            }
2227            is->audio_buf_index = 0;
2228         }
2229         len1 = is->audio_buf_size - is->audio_buf_index;
2230         if (len1 > len)
2231             len1 = len;
2232         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2233         len -= len1;
2234         stream += len1;
2235         is->audio_buf_index += len1;
2236     }
2237     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2238     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2239     /* Let's assume the audio driver that is used by SDL has two periods. */
2240     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2241     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2242 }
2243
2244 /* open a given stream. Return 0 if OK */
2245 static int stream_component_open(VideoState *is, int stream_index)
2246 {
2247     AVFormatContext *ic = is->ic;
2248     AVCodecContext *avctx;
2249     AVCodec *codec;
2250     SDL_AudioSpec wanted_spec, spec;
2251     AVDictionary *opts;
2252     AVDictionaryEntry *t = NULL;
2253     int64_t wanted_channel_layout = 0;
2254     int wanted_nb_channels;
2255     const char *env;
2256
2257     if (stream_index < 0 || stream_index >= ic->nb_streams)
2258         return -1;
2259     avctx = ic->streams[stream_index]->codec;
2260
2261     codec = avcodec_find_decoder(avctx->codec_id);
2262     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2263
2264     switch(avctx->codec_type){
2265         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2266         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2267         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2268     }
2269     if (!codec)
2270         return -1;
2271
2272     avctx->workaround_bugs   = workaround_bugs;
2273     avctx->lowres            = lowres;
2274     if(avctx->lowres > codec->max_lowres){
2275         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2276                 codec->max_lowres);
2277         avctx->lowres= codec->max_lowres;
2278     }
2279     avctx->idct_algo         = idct;
2280     avctx->skip_frame        = skip_frame;
2281     avctx->skip_idct         = skip_idct;
2282     avctx->skip_loop_filter  = skip_loop_filter;
2283     avctx->error_concealment = error_concealment;
2284
2285     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2286     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2287     if(codec->capabilities & CODEC_CAP_DR1)
2288         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2289
2290     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2291         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2292         env = SDL_getenv("SDL_AUDIO_CHANNELS");
2293         if (env)
2294             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2295         if (!wanted_channel_layout) {
2296             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2297             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2298             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2299             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2300             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > (SDL_VERSION_ATLEAST(1, 2, 8) ? 6 : 2))) {
2301                 wanted_nb_channels--;
2302                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2303             }
2304         }
2305         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2306         wanted_spec.freq = avctx->sample_rate;
2307         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2308             fprintf(stderr, "Invalid sample rate or channel count!\n");
2309             return -1;
2310         }
2311     }
2312
2313     if (!av_dict_get(opts, "threads", NULL, 0))
2314         av_dict_set(&opts, "threads", "auto", 0);
2315     if (!codec ||
2316         avcodec_open2(avctx, codec, &opts) < 0)
2317         return -1;
2318     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2319         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2320         return AVERROR_OPTION_NOT_FOUND;
2321     }
2322
2323     /* prepare audio output */
2324     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2325         wanted_spec.format = AUDIO_S16SYS;
2326         wanted_spec.silence = 0;
2327         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2328         wanted_spec.callback = sdl_audio_callback;
2329         wanted_spec.userdata = is;
2330         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2331             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2332             return -1;
2333         }
2334         is->audio_hw_buf_size = spec.size;
2335         if (spec.format != AUDIO_S16SYS) {
2336             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2337             return -1;
2338         }
2339         if (spec.channels != wanted_spec.channels) {
2340             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2341             if (!wanted_channel_layout) {
2342                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2343                 return -1;
2344             }
2345         }
2346         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2347         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2348         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2349         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2350     }
2351
2352     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2353     switch (avctx->codec_type) {
2354     case AVMEDIA_TYPE_AUDIO:
2355         is->audio_stream = stream_index;
2356         is->audio_st = ic->streams[stream_index];
2357         is->audio_buf_size  = 0;
2358         is->audio_buf_index = 0;
2359
2360         /* init averaging filter */
2361         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2362         is->audio_diff_avg_count = 0;
2363         /* since we do not have a precise anough audio fifo fullness,
2364            we correct audio sync only if larger than this threshold */
2365         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2366
2367         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2368         packet_queue_start(&is->audioq);
2369         SDL_PauseAudio(0);
2370         break;
2371     case AVMEDIA_TYPE_VIDEO:
2372         is->video_stream = stream_index;
2373         is->video_st = ic->streams[stream_index];
2374
2375         packet_queue_start(&is->videoq);
2376         is->video_tid = SDL_CreateThread(video_thread, is);
2377         break;
2378     case AVMEDIA_TYPE_SUBTITLE:
2379         is->subtitle_stream = stream_index;
2380         is->subtitle_st = ic->streams[stream_index];
2381         packet_queue_start(&is->subtitleq);
2382
2383         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2384         break;
2385     default:
2386         break;
2387     }
2388     return 0;
2389 }
2390
2391 static void stream_component_close(VideoState *is, int stream_index)
2392 {
2393     AVFormatContext *ic = is->ic;
2394     AVCodecContext *avctx;
2395
2396     if (stream_index < 0 || stream_index >= ic->nb_streams)
2397         return;
2398     avctx = ic->streams[stream_index]->codec;
2399
2400     switch (avctx->codec_type) {
2401     case AVMEDIA_TYPE_AUDIO:
2402         packet_queue_abort(&is->audioq);
2403
2404         SDL_CloseAudio();
2405
2406         packet_queue_flush(&is->audioq);
2407         av_free_packet(&is->audio_pkt);
2408         if (is->swr_ctx)
2409             swr_free(&is->swr_ctx);
2410         av_freep(&is->audio_buf1);
2411         is->audio_buf = NULL;
2412         av_freep(&is->frame);
2413
2414         if (is->rdft) {
2415             av_rdft_end(is->rdft);
2416             av_freep(&is->rdft_data);
2417             is->rdft = NULL;
2418             is->rdft_bits = 0;
2419         }
2420         break;
2421     case AVMEDIA_TYPE_VIDEO:
2422         packet_queue_abort(&is->videoq);
2423
2424         /* note: we also signal this mutex to make sure we deblock the
2425            video thread in all cases */
2426         SDL_LockMutex(is->pictq_mutex);
2427         SDL_CondSignal(is->pictq_cond);
2428         SDL_UnlockMutex(is->pictq_mutex);
2429
2430         SDL_WaitThread(is->video_tid, NULL);
2431
2432         packet_queue_flush(&is->videoq);
2433         break;
2434     case AVMEDIA_TYPE_SUBTITLE:
2435         packet_queue_abort(&is->subtitleq);
2436
2437         /* note: we also signal this mutex to make sure we deblock the
2438            video thread in all cases */
2439         SDL_LockMutex(is->subpq_mutex);
2440         is->subtitle_stream_changed = 1;
2441
2442         SDL_CondSignal(is->subpq_cond);
2443         SDL_UnlockMutex(is->subpq_mutex);
2444
2445         SDL_WaitThread(is->subtitle_tid, NULL);
2446
2447         packet_queue_flush(&is->subtitleq);
2448         break;
2449     default:
2450         break;
2451     }
2452
2453     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2454     avcodec_close(avctx);
2455     switch (avctx->codec_type) {
2456     case AVMEDIA_TYPE_AUDIO:
2457         is->audio_st = NULL;
2458         is->audio_stream = -1;
2459         break;
2460     case AVMEDIA_TYPE_VIDEO:
2461         is->video_st = NULL;
2462         is->video_stream = -1;
2463         break;
2464     case AVMEDIA_TYPE_SUBTITLE:
2465         is->subtitle_st = NULL;
2466         is->subtitle_stream = -1;
2467         break;
2468     default:
2469         break;
2470     }
2471 }
2472
2473 static int decode_interrupt_cb(void *ctx)
2474 {
2475     VideoState *is = ctx;
2476     return is->abort_request;
2477 }
2478
2479 /* this thread gets the stream from the disk or the network */
2480 static int read_thread(void *arg)
2481 {
2482     VideoState *is = arg;
2483     AVFormatContext *ic = NULL;
2484     int err, i, ret;
2485     int st_index[AVMEDIA_TYPE_NB];
2486     AVPacket pkt1, *pkt = &pkt1;
2487     int eof = 0;
2488     int pkt_in_play_range = 0;
2489     AVDictionaryEntry *t;
2490     AVDictionary **opts;
2491     int orig_nb_streams;
2492
2493     memset(st_index, -1, sizeof(st_index));
2494     is->last_video_stream = is->video_stream = -1;
2495     is->last_audio_stream = is->audio_stream = -1;
2496     is->last_subtitle_stream = is->subtitle_stream = -1;
2497
2498     ic = avformat_alloc_context();
2499     ic->interrupt_callback.callback = decode_interrupt_cb;
2500     ic->interrupt_callback.opaque = is;
2501     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2502     if (err < 0) {
2503         print_error(is->filename, err);
2504         ret = -1;
2505         goto fail;
2506     }
2507     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2508         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2509         ret = AVERROR_OPTION_NOT_FOUND;
2510         goto fail;
2511     }
2512     is->ic = ic;
2513
2514     if (genpts)
2515         ic->flags |= AVFMT_FLAG_GENPTS;
2516
2517     opts = setup_find_stream_info_opts(ic, codec_opts);
2518     orig_nb_streams = ic->nb_streams;
2519
2520     err = avformat_find_stream_info(ic, opts);
2521     if (err < 0) {
2522         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2523         ret = -1;
2524         goto fail;
2525     }
2526     for (i = 0; i < orig_nb_streams; i++)
2527         av_dict_free(&opts[i]);
2528     av_freep(&opts);
2529
2530     if (ic->pb)
2531         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2532
2533     if (seek_by_bytes < 0)
2534         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2535
2536     /* if seeking requested, we execute it */
2537     if (start_time != AV_NOPTS_VALUE) {
2538         int64_t timestamp;
2539
2540         timestamp = start_time;
2541         /* add the stream start time */
2542         if (ic->start_time != AV_NOPTS_VALUE)
2543             timestamp += ic->start_time;
2544         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2545         if (ret < 0) {
2546             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2547                     is->filename, (double)timestamp / AV_TIME_BASE);
2548         }
2549     }
2550
2551     for (i = 0; i < ic->nb_streams; i++)
2552         ic->streams[i]->discard = AVDISCARD_ALL;
2553     if (!video_disable)
2554         st_index[AVMEDIA_TYPE_VIDEO] =
2555             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2556                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2557     if (!audio_disable)
2558         st_index[AVMEDIA_TYPE_AUDIO] =
2559             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2560                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2561                                 st_index[AVMEDIA_TYPE_VIDEO],
2562                                 NULL, 0);
2563     if (!video_disable)
2564         st_index[AVMEDIA_TYPE_SUBTITLE] =
2565             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2566                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2567                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2568                                  st_index[AVMEDIA_TYPE_AUDIO] :
2569                                  st_index[AVMEDIA_TYPE_VIDEO]),
2570                                 NULL, 0);
2571     if (show_status) {
2572         av_dump_format(ic, 0, is->filename, 0);
2573     }
2574
2575     is->show_mode = show_mode;
2576
2577     /* open the streams */
2578     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2579         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2580     }
2581
2582     ret = -1;
2583     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2584         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2585     }
2586     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2587     if (is->show_mode == SHOW_MODE_NONE)
2588         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2589
2590     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2591         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2592     }
2593
2594     if (is->video_stream < 0 && is->audio_stream < 0) {
2595         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2596         ret = -1;
2597         goto fail;
2598     }
2599
2600     for (;;) {
2601         if (is->abort_request)
2602             break;
2603         if (is->paused != is->last_paused) {
2604             is->last_paused = is->paused;
2605             if (is->paused)
2606                 is->read_pause_return = av_read_pause(ic);
2607             else
2608                 av_read_play(ic);
2609         }
2610 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2611         if (is->paused &&
2612                 (!strcmp(ic->iformat->name, "rtsp") ||
2613                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2614             /* wait 10 ms to avoid trying to get another packet */
2615             /* XXX: horrible */
2616             SDL_Delay(10);
2617             continue;
2618         }
2619 #endif
2620         if (is->seek_req) {
2621             int64_t seek_target = is->seek_pos;
2622             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2623             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2624 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2625 //      of the seek_pos/seek_rel variables
2626
2627             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2628             if (ret < 0) {
2629                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2630             } else {
2631                 if (is->audio_stream >= 0) {
2632                     packet_queue_flush(&is->audioq);
2633                     packet_queue_put(&is->audioq, &flush_pkt);
2634                 }
2635                 if (is->subtitle_stream >= 0) {
2636                     packet_queue_flush(&is->subtitleq);
2637                     packet_queue_put(&is->subtitleq, &flush_pkt);
2638                 }
2639                 if (is->video_stream >= 0) {
2640                     packet_queue_flush(&is->videoq);
2641                     packet_queue_put(&is->videoq, &flush_pkt);
2642                 }
2643             }
2644             is->seek_req = 0;
2645             eof = 0;
2646         }
2647
2648         /* if the queue are full, no need to read more */
2649         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2650             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2651                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2652                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
2653             /* wait 10 ms */
2654             SDL_Delay(10);
2655             continue;
2656         }
2657         if (eof) {
2658             if (is->video_stream >= 0) {
2659                 av_init_packet(pkt);
2660                 pkt->data = NULL;
2661                 pkt->size = 0;
2662                 pkt->stream_index = is->video_stream;
2663                 packet_queue_put(&is->videoq, pkt);
2664             }
2665             if (is->audio_stream >= 0 &&
2666                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2667                 av_init_packet(pkt);
2668                 pkt->data = NULL;
2669                 pkt->size = 0;
2670                 pkt->stream_index = is->audio_stream;
2671                 packet_queue_put(&is->audioq, pkt);
2672             }
2673             SDL_Delay(10);
2674             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2675                 if (loop != 1 && (!loop || --loop)) {
2676                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2677                 } else if (autoexit) {
2678                     ret = AVERROR_EOF;
2679                     goto fail;
2680                 }
2681             }
2682             eof=0;
2683             continue;
2684         }
2685         ret = av_read_frame(ic, pkt);
2686         if (ret < 0) {
2687             if (ret == AVERROR_EOF || url_feof(ic->pb))
2688                 eof = 1;
2689             if (ic->pb && ic->pb->error)
2690                 break;
2691             SDL_Delay(100); /* wait for user event */
2692             continue;
2693         }
2694         /* check if packet is in play range specified by user, then queue, otherwise discard */
2695         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2696                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2697                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2698                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2699                 <= ((double)duration / 1000000);
2700         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2701             packet_queue_put(&is->audioq, pkt);
2702         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2703             packet_queue_put(&is->videoq, pkt);
2704         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2705             packet_queue_put(&is->subtitleq, pkt);
2706         } else {
2707             av_free_packet(pkt);
2708         }
2709     }
2710     /* wait until the end */
2711     while (!is->abort_request) {
2712         SDL_Delay(100);
2713     }
2714
2715     ret = 0;
2716  fail:
2717     /* close each stream */
2718     if (is->audio_stream >= 0)
2719         stream_component_close(is, is->audio_stream);
2720     if (is->video_stream >= 0)
2721         stream_component_close(is, is->video_stream);
2722     if (is->subtitle_stream >= 0)
2723         stream_component_close(is, is->subtitle_stream);
2724     if (is->ic) {
2725         avformat_close_input(&is->ic);
2726     }
2727
2728     if (ret != 0) {
2729         SDL_Event event;
2730
2731         event.type = FF_QUIT_EVENT;
2732         event.user.data1 = is;
2733         SDL_PushEvent(&event);
2734     }
2735     return 0;
2736 }
2737
2738 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2739 {
2740     VideoState *is;
2741
2742     is = av_mallocz(sizeof(VideoState));
2743     if (!is)
2744         return NULL;
2745     av_strlcpy(is->filename, filename, sizeof(is->filename));
2746     is->iformat = iformat;
2747     is->ytop    = 0;
2748     is->xleft   = 0;
2749
2750     /* start video display */
2751     is->pictq_mutex = SDL_CreateMutex();
2752     is->pictq_cond  = SDL_CreateCond();
2753
2754     is->subpq_mutex = SDL_CreateMutex();
2755     is->subpq_cond  = SDL_CreateCond();
2756
2757     packet_queue_init(&is->videoq);
2758     packet_queue_init(&is->audioq);
2759     packet_queue_init(&is->subtitleq);
2760
2761     is->av_sync_type = av_sync_type;
2762     is->read_tid     = SDL_CreateThread(read_thread, is);
2763     if (!is->read_tid) {
2764         av_free(is);
2765         return NULL;
2766     }
2767     return is;
2768 }
2769
2770 static void stream_cycle_channel(VideoState *is, int codec_type)
2771 {
2772     AVFormatContext *ic = is->ic;
2773     int start_index, stream_index;
2774     int old_index;
2775     AVStream *st;
2776
2777     if (codec_type == AVMEDIA_TYPE_VIDEO) {
2778         start_index = is->last_video_stream;
2779         old_index = is->video_stream;
2780     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2781         start_index = is->last_audio_stream;
2782         old_index = is->audio_stream;
2783     } else {
2784         start_index = is->last_subtitle_stream;
2785         old_index = is->subtitle_stream;
2786     }
2787     stream_index = start_index;
2788     for (;;) {
2789         if (++stream_index >= is->ic->nb_streams)
2790         {
2791             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2792             {
2793                 stream_index = -1;
2794                 is->last_subtitle_stream = -1;
2795                 goto the_end;
2796             }
2797             if (start_index == -1)
2798                 return;
2799             stream_index = 0;
2800         }
2801         if (stream_index == start_index)
2802             return;
2803         st = ic->streams[stream_index];
2804         if (st->codec->codec_type == codec_type) {
2805             /* check that parameters are OK */
2806             switch (codec_type) {
2807             case AVMEDIA_TYPE_AUDIO:
2808                 if (st->codec->sample_rate != 0 &&
2809                     st->codec->channels != 0)
2810                     goto the_end;
2811                 break;
2812             case AVMEDIA_TYPE_VIDEO:
2813             case AVMEDIA_TYPE_SUBTITLE:
2814                 goto the_end;
2815             default:
2816                 break;
2817             }
2818         }
2819     }
2820  the_end:
2821     stream_component_close(is, old_index);
2822     stream_component_open(is, stream_index);
2823 }
2824
2825
2826 static void toggle_full_screen(VideoState *is)
2827 {
2828     av_unused int i;
2829     is_full_screen = !is_full_screen;
2830 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2831     /* OS X needs to reallocate the SDL overlays */
2832     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2833         is->pictq[i].reallocate = 1;
2834     }
2835 #endif
2836     video_open(is, 1);
2837 }
2838
2839 static void toggle_pause(VideoState *is)
2840 {
2841     stream_toggle_pause(is);
2842     is->step = 0;
2843 }
2844
2845 static void step_to_next_frame(VideoState *is)
2846 {
2847     /* if the stream is paused unpause it, then step */
2848     if (is->paused)
2849         stream_toggle_pause(is);
2850     is->step = 1;
2851 }
2852
2853 static void toggle_audio_display(VideoState *is)
2854 {
2855     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2856     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2857     fill_rectangle(screen,
2858                 is->xleft, is->ytop, is->width, is->height,
2859                 bgcolor);
2860     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2861 }
2862
2863 /* handle an event sent by the GUI */
2864 static void event_loop(VideoState *cur_stream)
2865 {
2866     SDL_Event event;
2867     double incr, pos, frac;
2868
2869     for (;;) {
2870         double x;
2871         SDL_WaitEvent(&event);
2872         switch (event.type) {
2873         case SDL_KEYDOWN:
2874             if (exit_on_keydown) {
2875                 do_exit(cur_stream);
2876                 break;
2877             }
2878             switch (event.key.keysym.sym) {
2879             case SDLK_ESCAPE:
2880             case SDLK_q:
2881                 do_exit(cur_stream);
2882                 break;
2883             case SDLK_f:
2884                 toggle_full_screen(cur_stream);
2885                 cur_stream->force_refresh = 1;
2886                 break;
2887             case SDLK_p:
2888             case SDLK_SPACE:
2889                 toggle_pause(cur_stream);
2890                 break;
2891             case SDLK_s: // S: Step to next frame
2892                 step_to_next_frame(cur_stream);
2893                 break;
2894             case SDLK_a:
2895                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2896                 break;
2897             case SDLK_v:
2898                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2899                 break;
2900             case SDLK_t:
2901                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2902                 break;
2903             case SDLK_w:
2904                 toggle_audio_display(cur_stream);
2905                 cur_stream->force_refresh = 1;
2906                 break;
2907             case SDLK_PAGEUP:
2908                 incr = 600.0;
2909                 goto do_seek;
2910             case SDLK_PAGEDOWN:
2911                 incr = -600.0;
2912                 goto do_seek;
2913             case SDLK_LEFT:
2914                 incr = -10.0;
2915                 goto do_seek;
2916             case SDLK_RIGHT:
2917                 incr = 10.0;
2918                 goto do_seek;
2919             case SDLK_UP:
2920                 incr = 60.0;
2921                 goto do_seek;
2922             case SDLK_DOWN:
2923                 incr = -60.0;
2924             do_seek:
2925                     if (seek_by_bytes) {
2926                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2927                             pos = cur_stream->video_current_pos;
2928                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2929                             pos = cur_stream->audio_pkt.pos;
2930                         } else
2931                             pos = avio_tell(cur_stream->ic->pb);
2932                         if (cur_stream->ic->bit_rate)
2933                             incr *= cur_stream->ic->bit_rate / 8.0;
2934                         else
2935                             incr *= 180000.0;
2936                         pos += incr;
2937                         stream_seek(cur_stream, pos, incr, 1);
2938                     } else {
2939                         pos = get_master_clock(cur_stream);
2940                         pos += incr;
2941                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2942                     }
2943                 break;
2944             default:
2945                 break;
2946             }
2947             break;
2948         case SDL_VIDEOEXPOSE:
2949             cur_stream->force_refresh = 1;
2950             break;
2951         case SDL_MOUSEBUTTONDOWN:
2952             if (exit_on_mousedown) {
2953                 do_exit(cur_stream);
2954                 break;
2955             }
2956         case SDL_MOUSEMOTION:
2957             if (event.type == SDL_MOUSEBUTTONDOWN) {
2958                 x = event.button.x;
2959             } else {
2960                 if (event.motion.state != SDL_PRESSED)
2961                     break;
2962                 x = event.motion.x;
2963             }
2964                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2965                     uint64_t size =  avio_size(cur_stream->ic->pb);
2966                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2967                 } else {
2968                     int64_t ts;
2969                     int ns, hh, mm, ss;
2970                     int tns, thh, tmm, tss;
2971                     tns  = cur_stream->ic->duration / 1000000LL;
2972                     thh  = tns / 3600;
2973                     tmm  = (tns % 3600) / 60;
2974                     tss  = (tns % 60);
2975                     frac = x / cur_stream->width;
2976                     ns   = frac * tns;
2977                     hh   = ns / 3600;
2978                     mm   = (ns % 3600) / 60;
2979                     ss   = (ns % 60);
2980                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2981                             hh, mm, ss, thh, tmm, tss);
2982                     ts = frac * cur_stream->ic->duration;
2983                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2984                         ts += cur_stream->ic->start_time;
2985                     stream_seek(cur_stream, ts, 0, 0);
2986                 }
2987             break;
2988         case SDL_VIDEORESIZE:
2989                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2990                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2991                 screen_width  = cur_stream->width  = event.resize.w;
2992                 screen_height = cur_stream->height = event.resize.h;
2993                 cur_stream->force_refresh = 1;
2994             break;
2995         case SDL_QUIT:
2996         case FF_QUIT_EVENT:
2997             do_exit(cur_stream);
2998             break;
2999         case FF_ALLOC_EVENT:
3000             alloc_picture(event.user.data1);
3001             break;
3002         case FF_REFRESH_EVENT:
3003             video_refresh(event.user.data1);
3004             cur_stream->refresh = 0;
3005             break;
3006         default:
3007             break;
3008         }
3009     }
3010 }
3011
3012 static int opt_frame_size(const char *opt, const char *arg)
3013 {
3014     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3015     return opt_default("video_size", arg);
3016 }
3017
3018 static int opt_width(const char *opt, const char *arg)
3019 {
3020     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3021     return 0;
3022 }
3023
3024 static int opt_height(const char *opt, const char *arg)
3025 {
3026     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3027     return 0;
3028 }
3029
3030 static int opt_format(const char *opt, const char *arg)
3031 {
3032     file_iformat = av_find_input_format(arg);
3033     if (!file_iformat) {
3034         fprintf(stderr, "Unknown input format: %s\n", arg);
3035         return AVERROR(EINVAL);
3036     }
3037     return 0;
3038 }
3039
3040 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3041 {
3042     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3043     return opt_default("pixel_format", arg);
3044 }
3045
3046 static int opt_sync(const char *opt, const char *arg)
3047 {
3048     if (!strcmp(arg, "audio"))
3049         av_sync_type = AV_SYNC_AUDIO_MASTER;
3050     else if (!strcmp(arg, "video"))
3051         av_sync_type = AV_SYNC_VIDEO_MASTER;
3052     else if (!strcmp(arg, "ext"))
3053         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3054     else {
3055         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3056         exit(1);
3057     }
3058     return 0;
3059 }
3060
3061 static int opt_seek(const char *opt, const char *arg)
3062 {
3063     start_time = parse_time_or_die(opt, arg, 1);
3064     return 0;
3065 }
3066
3067 static int opt_duration(const char *opt, const char *arg)
3068 {
3069     duration = parse_time_or_die(opt, arg, 1);
3070     return 0;
3071 }
3072
3073 static int opt_show_mode(const char *opt, const char *arg)
3074 {
3075     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3076                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3077                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3078                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3079     return 0;
3080 }
3081
3082 static void opt_input_file(void *optctx, const char *filename)
3083 {
3084     if (input_filename) {
3085         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3086                 filename, input_filename);
3087         exit_program(1);
3088     }
3089     if (!strcmp(filename, "-"))
3090         filename = "pipe:";
3091     input_filename = filename;
3092 }
3093
3094 static int opt_codec(void *o, const char *opt, const char *arg)
3095 {
3096     switch(opt[strlen(opt)-1]){
3097     case 'a' :    audio_codec_name = arg; break;
3098     case 's' : subtitle_codec_name = arg; break;
3099     case 'v' :    video_codec_name = arg; break;
3100     }
3101     return 0;
3102 }
3103
3104 static int dummy;
3105
3106 static const OptionDef options[] = {
3107 #include "cmdutils_common_opts.h"
3108     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3109     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3110     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3111     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3112     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3113     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3114     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3115     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3116     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3117     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3118     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3119     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3120     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3121     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3122     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3123     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3124     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3125     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3126     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3127     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3128     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3129     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3130     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3131     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3132     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3133     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3134     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3135     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3136     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3137     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3138     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3139     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3140     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3141 #if CONFIG_AVFILTER
3142     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3143 #endif
3144     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3145     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3146     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3147     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3148     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3149     { NULL, },
3150 };
3151
3152 static void show_usage(void)
3153 {
3154     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3155     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3156     av_log(NULL, AV_LOG_INFO, "\n");
3157 }
3158
3159 static int opt_help(const char *opt, const char *arg)
3160 {
3161     av_log_set_callback(log_callback_help);
3162     show_usage();
3163     show_help_options(options, "Main options:\n",
3164                       OPT_EXPERT, 0);
3165     show_help_options(options, "\nAdvanced options:\n",
3166                       OPT_EXPERT, OPT_EXPERT);
3167     printf("\n");
3168     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3169     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3170 #if !CONFIG_AVFILTER
3171     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3172 #endif
3173     printf("\nWhile playing:\n"
3174            "q, ESC              quit\n"
3175            "f                   toggle full screen\n"
3176            "p, SPC              pause\n"
3177            "a                   cycle audio channel\n"
3178            "v                   cycle video channel\n"
3179            "t                   cycle subtitle channel\n"
3180            "w                   show audio waves\n"
3181            "s                   activate frame-step mode\n"
3182            "left/right          seek backward/forward 10 seconds\n"
3183            "down/up             seek backward/forward 1 minute\n"
3184            "page down/page up   seek backward/forward 10 minutes\n"
3185            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3186            );
3187     return 0;
3188 }
3189
3190 static int lockmgr(void **mtx, enum AVLockOp op)
3191 {
3192    switch(op) {
3193       case AV_LOCK_CREATE:
3194           *mtx = SDL_CreateMutex();
3195           if(!*mtx)
3196               return 1;
3197           return 0;
3198       case AV_LOCK_OBTAIN:
3199           return !!SDL_LockMutex(*mtx);
3200       case AV_LOCK_RELEASE:
3201           return !!SDL_UnlockMutex(*mtx);
3202       case AV_LOCK_DESTROY:
3203           SDL_DestroyMutex(*mtx);
3204           return 0;
3205    }
3206    return 1;
3207 }
3208
3209 /* Called from the main */
3210 int main(int argc, char **argv)
3211 {
3212     int flags;
3213     VideoState *is;
3214
3215     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3216     parse_loglevel(argc, argv, options);
3217
3218     /* register all codecs, demux and protocols */
3219     avcodec_register_all();
3220 #if CONFIG_AVDEVICE
3221     avdevice_register_all();
3222 #endif
3223 #if CONFIG_AVFILTER
3224     avfilter_register_all();
3225 #endif
3226     av_register_all();
3227     avformat_network_init();
3228
3229     init_opts();
3230
3231     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3232     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3233
3234     show_banner(argc, argv, options);
3235
3236     parse_options(NULL, argc, argv, options, opt_input_file);
3237
3238     if (!input_filename) {
3239         show_usage();
3240         fprintf(stderr, "An input file must be specified\n");
3241         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3242         exit(1);
3243     }
3244
3245     if (display_disable) {
3246         video_disable = 1;
3247     }
3248     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3249     if (audio_disable)
3250         flags &= ~SDL_INIT_AUDIO;
3251 #if !defined(__MINGW32__) && !defined(__APPLE__)
3252     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3253 #endif
3254     if (SDL_Init (flags)) {
3255         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3256         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3257         exit(1);
3258     }
3259
3260     if (!display_disable) {
3261 #if HAVE_SDL_VIDEO_SIZE
3262         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3263         fs_screen_width = vi->current_w;
3264         fs_screen_height = vi->current_h;
3265 #endif
3266     }
3267
3268     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3269     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3270     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3271
3272     if (av_lockmgr_register(lockmgr)) {
3273         fprintf(stderr, "Could not initialize lock manager!\n");
3274         do_exit(NULL);
3275     }
3276
3277     av_init_packet(&flush_pkt);
3278     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3279
3280     is = stream_open(input_filename, file_iformat);
3281     if (!is) {
3282         fprintf(stderr, "Failed to initialize VideoState!\n");
3283         do_exit(NULL);
3284     }
3285
3286     event_loop(is);
3287
3288     /* never returns */
3289
3290     return 0;
3291 }