ffplay: dont destroy packet queues on stream change
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavformat/avformat.h"
41 #include "libavdevice/avdevice.h"
42 #include "libswscale/swscale.h"
43 #include "libavutil/opt.h"
44 #include "libavcodec/avfft.h"
45 #include "libswresample/swresample.h"
46
47 #if CONFIG_AVFILTER
48 # include "libavfilter/avcodec.h"
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/avfiltergraph.h"
51 # include "libavfilter/buffersink.h"
52 #endif
53
54 #include <SDL.h>
55 #include <SDL_thread.h>
56
57 #include "cmdutils.h"
58
59 #include <unistd.h>
60 #include <assert.h>
61
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;                                  ///< presentation time stamp for this picture
102     double duration;                             ///< expected duration of the frame
103     int64_t pos;                                 ///< byte position in file
104     int skip;
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     AVRational sample_aspect_ratio;
108     int allocated;
109     int reallocate;
110     enum PixelFormat pix_fmt;
111
112 #if CONFIG_AVFILTER
113     AVFilterBufferRef *picref;
114 #endif
115 } VideoPicture;
116
117 typedef struct SubPicture {
118     double pts; /* presentation time stamp for this picture */
119     AVSubtitle sub;
120 } SubPicture;
121
122 enum {
123     AV_SYNC_AUDIO_MASTER, /* default choice */
124     AV_SYNC_VIDEO_MASTER,
125     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126 };
127
128 typedef struct VideoState {
129     SDL_Thread *read_tid;
130     SDL_Thread *video_tid;
131     SDL_Thread *refresh_tid;
132     AVInputFormat *iformat;
133     int no_background;
134     int abort_request;
135     int force_refresh;
136     int paused;
137     int last_paused;
138     int seek_req;
139     int seek_flags;
140     int64_t seek_pos;
141     int64_t seek_rel;
142     int read_pause_return;
143     AVFormatContext *ic;
144
145     int audio_stream;
146
147     int av_sync_type;
148     double external_clock; /* external clock base */
149     int64_t external_clock_time;
150
151     double audio_clock;
152     double audio_diff_cum; /* used for AV difference average computation */
153     double audio_diff_avg_coef;
154     double audio_diff_threshold;
155     int audio_diff_avg_count;
156     AVStream *audio_st;
157     PacketQueue audioq;
158     int audio_hw_buf_size;
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
160     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
161     uint8_t *audio_buf;
162     uint8_t *audio_buf1;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     int audio_write_buf_size;
166     AVPacket audio_pkt_temp;
167     AVPacket audio_pkt;
168     enum AVSampleFormat audio_src_fmt;
169     enum AVSampleFormat audio_tgt_fmt;
170     int audio_src_channels;
171     int audio_tgt_channels;
172     int64_t audio_src_channel_layout;
173     int64_t audio_tgt_channel_layout;
174     int audio_src_freq;
175     int audio_tgt_freq;
176     struct SwrContext *swr_ctx;
177     double audio_current_pts;
178     double audio_current_pts_drift;
179     int frame_drops_early;
180     int frame_drops_late;
181     AVFrame *frame;
182
183     enum ShowMode {
184         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
185     } show_mode;
186     int16_t sample_array[SAMPLE_ARRAY_SIZE];
187     int sample_array_index;
188     int last_i_start;
189     RDFTContext *rdft;
190     int rdft_bits;
191     FFTSample *rdft_data;
192     int xpos;
193
194     SDL_Thread *subtitle_tid;
195     int subtitle_stream;
196     int subtitle_stream_changed;
197     AVStream *subtitle_st;
198     PacketQueue subtitleq;
199     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
200     int subpq_size, subpq_rindex, subpq_windex;
201     SDL_mutex *subpq_mutex;
202     SDL_cond *subpq_cond;
203
204     double frame_timer;
205     double frame_last_pts;
206     double frame_last_duration;
207     double frame_last_dropped_pts;
208     double frame_last_returned_time;
209     double frame_last_filter_delay;
210     int64_t frame_last_dropped_pos;
211     double video_clock;                          ///< pts of last decoded frame / predicted pts of next decoded frame
212     int video_stream;
213     AVStream *video_st;
214     PacketQueue videoq;
215     double video_current_pts;                    ///< current displayed pts (different from video_clock if frame fifos are used)
216     double video_current_pts_drift;              ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
217     int64_t video_current_pos;                   ///< current displayed file pos
218     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
219     int pictq_size, pictq_rindex, pictq_windex;
220     SDL_mutex *pictq_mutex;
221     SDL_cond *pictq_cond;
222 #if !CONFIG_AVFILTER
223     struct SwsContext *img_convert_ctx;
224 #endif
225
226     char filename[1024];
227     int width, height, xleft, ytop;
228     int step;
229
230 #if CONFIG_AVFILTER
231     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
232 #endif
233
234     int refresh;
235 } VideoState;
236
237 typedef struct AllocEventProps {
238     VideoState *is;
239     AVFrame *frame;
240 } AllocEventProps;
241
242 static int opt_help(const char *opt, const char *arg);
243
244 /* options specified by the user */
245 static AVInputFormat *file_iformat;
246 static const char *input_filename;
247 static const char *window_title;
248 static int fs_screen_width;
249 static int fs_screen_height;
250 static int screen_width  = 0;
251 static int screen_height = 0;
252 static int audio_disable;
253 static int video_disable;
254 static int wanted_stream[AVMEDIA_TYPE_NB] = {
255     [AVMEDIA_TYPE_AUDIO]    = -1,
256     [AVMEDIA_TYPE_VIDEO]    = -1,
257     [AVMEDIA_TYPE_SUBTITLE] = -1,
258 };
259 static int seek_by_bytes = -1;
260 static int display_disable;
261 static int show_status = 1;
262 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
263 static int64_t start_time = AV_NOPTS_VALUE;
264 static int64_t duration = AV_NOPTS_VALUE;
265 static int workaround_bugs = 1;
266 static int fast = 0;
267 static int genpts = 0;
268 static int lowres = 0;
269 static int idct = FF_IDCT_AUTO;
270 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
271 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
272 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
273 static int error_concealment = 3;
274 static int decoder_reorder_pts = -1;
275 static int autoexit;
276 static int exit_on_keydown;
277 static int exit_on_mousedown;
278 static int loop = 1;
279 static int framedrop = -1;
280 static enum ShowMode show_mode = SHOW_MODE_NONE;
281 static const char *audio_codec_name;
282 static const char *subtitle_codec_name;
283 static const char *video_codec_name;
284 static int rdftspeed = 20;
285 #if CONFIG_AVFILTER
286 static char *vfilters = NULL;
287 #endif
288
289 /* current context */
290 static int is_full_screen;
291 static int64_t audio_callback_time;
292
293 static AVPacket flush_pkt;
294
295 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
296 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
297 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
298
299 static SDL_Surface *screen;
300
301 void av_noreturn exit_program(int ret)
302 {
303     exit(ret);
304 }
305
306 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
307 {
308     AVPacketList *pkt1;
309
310     if (q->abort_request)
311        return -1;
312
313     pkt1 = av_malloc(sizeof(AVPacketList));
314     if (!pkt1)
315         return -1;
316     pkt1->pkt = *pkt;
317     pkt1->next = NULL;
318
319     if (!q->last_pkt)
320         q->first_pkt = pkt1;
321     else
322         q->last_pkt->next = pkt1;
323     q->last_pkt = pkt1;
324     q->nb_packets++;
325     q->size += pkt1->pkt.size + sizeof(*pkt1);
326     /* XXX: should duplicate packet data in DV case */
327     SDL_CondSignal(q->cond);
328     return 0;
329 }
330
331 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
332 {
333     int ret;
334
335     /* duplicate the packet */
336     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
337         return -1;
338
339     SDL_LockMutex(q->mutex);
340     ret = packet_queue_put_private(q, pkt);
341     SDL_UnlockMutex(q->mutex);
342
343     if (pkt != &flush_pkt && ret < 0)
344         av_free_packet(pkt);
345
346     return ret;
347 }
348
349 /* packet queue handling */
350 static void packet_queue_init(PacketQueue *q)
351 {
352     memset(q, 0, sizeof(PacketQueue));
353     q->mutex = SDL_CreateMutex();
354     q->cond = SDL_CreateCond();
355     q->abort_request = 1;
356 }
357
358 static void packet_queue_flush(PacketQueue *q)
359 {
360     AVPacketList *pkt, *pkt1;
361
362     SDL_LockMutex(q->mutex);
363     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
364         pkt1 = pkt->next;
365         av_free_packet(&pkt->pkt);
366         av_freep(&pkt);
367     }
368     q->last_pkt = NULL;
369     q->first_pkt = NULL;
370     q->nb_packets = 0;
371     q->size = 0;
372     SDL_UnlockMutex(q->mutex);
373 }
374
375 static void packet_queue_destroy(PacketQueue *q)
376 {
377     packet_queue_flush(q);
378     SDL_DestroyMutex(q->mutex);
379     SDL_DestroyCond(q->cond);
380 }
381
382 static void packet_queue_abort(PacketQueue *q)
383 {
384     SDL_LockMutex(q->mutex);
385
386     q->abort_request = 1;
387
388     SDL_CondSignal(q->cond);
389
390     SDL_UnlockMutex(q->mutex);
391 }
392
393 static void packet_queue_start(PacketQueue *q)
394 {
395     SDL_LockMutex(q->mutex);
396     q->abort_request = 0;
397     packet_queue_put_private(q, &flush_pkt);
398     SDL_UnlockMutex(q->mutex);
399 }
400
401 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
402 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
403 {
404     AVPacketList *pkt1;
405     int ret;
406
407     SDL_LockMutex(q->mutex);
408
409     for (;;) {
410         if (q->abort_request) {
411             ret = -1;
412             break;
413         }
414
415         pkt1 = q->first_pkt;
416         if (pkt1) {
417             q->first_pkt = pkt1->next;
418             if (!q->first_pkt)
419                 q->last_pkt = NULL;
420             q->nb_packets--;
421             q->size -= pkt1->pkt.size + sizeof(*pkt1);
422             *pkt = pkt1->pkt;
423             av_free(pkt1);
424             ret = 1;
425             break;
426         } else if (!block) {
427             ret = 0;
428             break;
429         } else {
430             SDL_CondWait(q->cond, q->mutex);
431         }
432     }
433     SDL_UnlockMutex(q->mutex);
434     return ret;
435 }
436
437 static inline void fill_rectangle(SDL_Surface *screen,
438                                   int x, int y, int w, int h, int color)
439 {
440     SDL_Rect rect;
441     rect.x = x;
442     rect.y = y;
443     rect.w = w;
444     rect.h = h;
445     SDL_FillRect(screen, &rect, color);
446 }
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     avsubtitle_free(&sp->sub);
680 }
681
682 static void video_image_display(VideoState *is)
683 {
684     VideoPicture *vp;
685     SubPicture *sp;
686     AVPicture pict;
687     float aspect_ratio;
688     int width, height, x, y;
689     SDL_Rect rect;
690     int i;
691
692     vp = &is->pictq[is->pictq_rindex];
693     if (vp->bmp) {
694         if (vp->sample_aspect_ratio.num == 0)
695             aspect_ratio = 0;
696         else
697             aspect_ratio = av_q2d(vp->sample_aspect_ratio);
698
699         if (aspect_ratio <= 0.0)
700             aspect_ratio = 1.0;
701         aspect_ratio *= (float)vp->width / (float)vp->height;
702
703         if (is->subtitle_st) {
704             if (is->subpq_size > 0) {
705                 sp = &is->subpq[is->subpq_rindex];
706
707                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
708                     SDL_LockYUVOverlay (vp->bmp);
709
710                     pict.data[0] = vp->bmp->pixels[0];
711                     pict.data[1] = vp->bmp->pixels[2];
712                     pict.data[2] = vp->bmp->pixels[1];
713
714                     pict.linesize[0] = vp->bmp->pitches[0];
715                     pict.linesize[1] = vp->bmp->pitches[2];
716                     pict.linesize[2] = vp->bmp->pitches[1];
717
718                     for (i = 0; i < sp->sub.num_rects; i++)
719                         blend_subrect(&pict, sp->sub.rects[i],
720                                       vp->bmp->w, vp->bmp->h);
721
722                     SDL_UnlockYUVOverlay (vp->bmp);
723                 }
724             }
725         }
726
727
728         /* XXX: we suppose the screen has a 1.0 pixel ratio */
729         height = is->height;
730         width = ((int)rint(height * aspect_ratio)) & ~1;
731         if (width > is->width) {
732             width = is->width;
733             height = ((int)rint(width / aspect_ratio)) & ~1;
734         }
735         x = (is->width - width) / 2;
736         y = (is->height - height) / 2;
737         is->no_background = 0;
738         rect.x = is->xleft + x;
739         rect.y = is->ytop  + y;
740         rect.w = FFMAX(width,  1);
741         rect.h = FFMAX(height, 1);
742         SDL_DisplayYUVOverlay(vp->bmp, &rect);
743     }
744 }
745
746 static inline int compute_mod(int a, int b)
747 {
748     return a < 0 ? a%b + b : a%b;
749 }
750
751 static void video_audio_display(VideoState *s)
752 {
753     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
754     int ch, channels, h, h2, bgcolor, fgcolor;
755     int16_t time_diff;
756     int rdft_bits, nb_freq;
757
758     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
759         ;
760     nb_freq = 1 << (rdft_bits - 1);
761
762     /* compute display index : center on currently output samples */
763     channels = s->audio_tgt_channels;
764     nb_display_channels = channels;
765     if (!s->paused) {
766         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
767         n = 2 * channels;
768         delay = s->audio_write_buf_size;
769         delay /= n;
770
771         /* to be more precise, we take into account the time spent since
772            the last buffer computation */
773         if (audio_callback_time) {
774             time_diff = av_gettime() - audio_callback_time;
775             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
776         }
777
778         delay += 2 * data_used;
779         if (delay < data_used)
780             delay = data_used;
781
782         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
783         if (s->show_mode == SHOW_MODE_WAVES) {
784             h = INT_MIN;
785             for (i = 0; i < 1000; i += channels) {
786                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
787                 int a = s->sample_array[idx];
788                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
789                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
790                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
791                 int score = a - d;
792                 if (h < score && (b ^ c) < 0) {
793                     h = score;
794                     i_start = idx;
795                 }
796             }
797         }
798
799         s->last_i_start = i_start;
800     } else {
801         i_start = s->last_i_start;
802     }
803
804     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
805     if (s->show_mode == SHOW_MODE_WAVES) {
806         fill_rectangle(screen,
807                        s->xleft, s->ytop, s->width, s->height,
808                        bgcolor);
809
810         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
811
812         /* total height for one channel */
813         h = s->height / nb_display_channels;
814         /* graph height / 2 */
815         h2 = (h * 9) / 20;
816         for (ch = 0; ch < nb_display_channels; ch++) {
817             i = i_start + ch;
818             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
819             for (x = 0; x < s->width; x++) {
820                 y = (s->sample_array[i] * h2) >> 15;
821                 if (y < 0) {
822                     y = -y;
823                     ys = y1 - y;
824                 } else {
825                     ys = y1;
826                 }
827                 fill_rectangle(screen,
828                                s->xleft + x, ys, 1, y,
829                                fgcolor);
830                 i += channels;
831                 if (i >= SAMPLE_ARRAY_SIZE)
832                     i -= SAMPLE_ARRAY_SIZE;
833             }
834         }
835
836         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
837
838         for (ch = 1; ch < nb_display_channels; ch++) {
839             y = s->ytop + ch * h;
840             fill_rectangle(screen,
841                            s->xleft, y, s->width, 1,
842                            fgcolor);
843         }
844         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
845     } else {
846         nb_display_channels= FFMIN(nb_display_channels, 2);
847         if (rdft_bits != s->rdft_bits) {
848             av_rdft_end(s->rdft);
849             av_free(s->rdft_data);
850             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
851             s->rdft_bits = rdft_bits;
852             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
853         }
854         {
855             FFTSample *data[2];
856             for (ch = 0; ch < nb_display_channels; ch++) {
857                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
858                 i = i_start + ch;
859                 for (x = 0; x < 2 * nb_freq; x++) {
860                     double w = (x-nb_freq) * (1.0 / nb_freq);
861                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
862                     i += channels;
863                     if (i >= SAMPLE_ARRAY_SIZE)
864                         i -= SAMPLE_ARRAY_SIZE;
865                 }
866                 av_rdft_calc(s->rdft, data[ch]);
867             }
868             // least efficient way to do this, we should of course directly access it but its more than fast enough
869             for (y = 0; y < s->height; y++) {
870                 double w = 1 / sqrt(nb_freq);
871                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
872                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
873                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
874                 a = FFMIN(a, 255);
875                 b = FFMIN(b, 255);
876                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
877
878                 fill_rectangle(screen,
879                             s->xpos, s->height-y, 1, 1,
880                             fgcolor);
881             }
882         }
883         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
884         if (!s->paused)
885             s->xpos++;
886         if (s->xpos >= s->width)
887             s->xpos= s->xleft;
888     }
889 }
890
891 static void stream_close(VideoState *is)
892 {
893     VideoPicture *vp;
894     int i;
895     /* XXX: use a special url_shutdown call to abort parse cleanly */
896     is->abort_request = 1;
897     SDL_WaitThread(is->read_tid, NULL);
898     SDL_WaitThread(is->refresh_tid, NULL);
899     packet_queue_destroy(&is->videoq);
900     packet_queue_destroy(&is->audioq);
901     packet_queue_destroy(&is->subtitleq);
902
903     /* free all pictures */
904     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
905         vp = &is->pictq[i];
906 #if CONFIG_AVFILTER
907         if (vp->picref) {
908             avfilter_unref_buffer(vp->picref);
909             vp->picref = NULL;
910         }
911 #endif
912         if (vp->bmp) {
913             SDL_FreeYUVOverlay(vp->bmp);
914             vp->bmp = NULL;
915         }
916     }
917     SDL_DestroyMutex(is->pictq_mutex);
918     SDL_DestroyCond(is->pictq_cond);
919     SDL_DestroyMutex(is->subpq_mutex);
920     SDL_DestroyCond(is->subpq_cond);
921 #if !CONFIG_AVFILTER
922     if (is->img_convert_ctx)
923         sws_freeContext(is->img_convert_ctx);
924 #endif
925     av_free(is);
926 }
927
928 static void do_exit(VideoState *is)
929 {
930     if (is) {
931         stream_close(is);
932     }
933     av_lockmgr_register(NULL);
934     uninit_opts();
935 #if CONFIG_AVFILTER
936     avfilter_uninit();
937 #endif
938     avformat_network_deinit();
939     if (show_status)
940         printf("\n");
941     SDL_Quit();
942     av_log(NULL, AV_LOG_QUIET, "%s", "");
943     exit(0);
944 }
945
946 static void sigterm_handler(int sig)
947 {
948     exit(123);
949 }
950
951 static int video_open(VideoState *is, int force_set_video_mode)
952 {
953     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
954     int w,h;
955     VideoPicture *vp = &is->pictq[is->pictq_rindex];
956
957     if (is_full_screen) flags |= SDL_FULLSCREEN;
958     else                flags |= SDL_RESIZABLE;
959
960     if (is_full_screen && fs_screen_width) {
961         w = fs_screen_width;
962         h = fs_screen_height;
963     } else if (!is_full_screen && screen_width) {
964         w = screen_width;
965         h = screen_height;
966     } else if (vp->width) {
967         w = vp->width;
968         h = vp->height;
969     } else {
970         w = 640;
971         h = 480;
972     }
973     if (screen && is->width == screen->w && screen->w == w
974        && is->height== screen->h && screen->h == h && !force_set_video_mode)
975         return 0;
976     screen = SDL_SetVideoMode(w, h, 0, flags);
977     if (!screen) {
978         fprintf(stderr, "SDL: could not set video mode - exiting\n");
979         do_exit(is);
980     }
981     if (!window_title)
982         window_title = input_filename;
983     SDL_WM_SetCaption(window_title, window_title);
984
985     is->width  = screen->w;
986     is->height = screen->h;
987
988     return 0;
989 }
990
991 /* display the current picture, if any */
992 static void video_display(VideoState *is)
993 {
994     if (!screen)
995         video_open(is, 0);
996     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
997         video_audio_display(is);
998     else if (is->video_st)
999         video_image_display(is);
1000 }
1001
1002 static int refresh_thread(void *opaque)
1003 {
1004     VideoState *is= opaque;
1005     while (!is->abort_request) {
1006         SDL_Event event;
1007         event.type = FF_REFRESH_EVENT;
1008         event.user.data1 = opaque;
1009         if (!is->refresh && (!is->paused || is->force_refresh)) {
1010             is->refresh = 1;
1011             SDL_PushEvent(&event);
1012         }
1013         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1014         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1015     }
1016     return 0;
1017 }
1018
1019 /* get the current audio clock value */
1020 static double get_audio_clock(VideoState *is)
1021 {
1022     if (is->paused) {
1023         return is->audio_current_pts;
1024     } else {
1025         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1026     }
1027 }
1028
1029 /* get the current video clock value */
1030 static double get_video_clock(VideoState *is)
1031 {
1032     if (is->paused) {
1033         return is->video_current_pts;
1034     } else {
1035         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1036     }
1037 }
1038
1039 /* get the current external clock value */
1040 static double get_external_clock(VideoState *is)
1041 {
1042     int64_t ti;
1043     ti = av_gettime();
1044     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1045 }
1046
1047 /* get the current master clock value */
1048 static double get_master_clock(VideoState *is)
1049 {
1050     double val;
1051
1052     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1053         if (is->video_st)
1054             val = get_video_clock(is);
1055         else
1056             val = get_audio_clock(is);
1057     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1058         if (is->audio_st)
1059             val = get_audio_clock(is);
1060         else
1061             val = get_video_clock(is);
1062     } else {
1063         val = get_external_clock(is);
1064     }
1065     return val;
1066 }
1067
1068 /* seek in the stream */
1069 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1070 {
1071     if (!is->seek_req) {
1072         is->seek_pos = pos;
1073         is->seek_rel = rel;
1074         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1075         if (seek_by_bytes)
1076             is->seek_flags |= AVSEEK_FLAG_BYTE;
1077         is->seek_req = 1;
1078     }
1079 }
1080
1081 /* pause or resume the video */
1082 static void stream_toggle_pause(VideoState *is)
1083 {
1084     if (is->paused) {
1085         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1086         if (is->read_pause_return != AVERROR(ENOSYS)) {
1087             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1088         }
1089         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1090     }
1091     is->paused = !is->paused;
1092 }
1093
1094 static double compute_target_delay(double delay, VideoState *is)
1095 {
1096     double sync_threshold, diff;
1097
1098     /* update delay to follow master synchronisation source */
1099     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1100          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1101         /* if video is slave, we try to correct big delays by
1102            duplicating or deleting a frame */
1103         diff = get_video_clock(is) - get_master_clock(is);
1104
1105         /* skip or repeat frame. We take into account the
1106            delay to compute the threshold. I still don't know
1107            if it is the best guess */
1108         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1109         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1110             if (diff <= -sync_threshold)
1111                 delay = 0;
1112             else if (diff >= sync_threshold)
1113                 delay = 2 * delay;
1114         }
1115     }
1116
1117     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1118             delay, -diff);
1119
1120     return delay;
1121 }
1122
1123 static void pictq_next_picture(VideoState *is) {
1124     /* update queue size and signal for next picture */
1125     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1126         is->pictq_rindex = 0;
1127
1128     SDL_LockMutex(is->pictq_mutex);
1129     is->pictq_size--;
1130     SDL_CondSignal(is->pictq_cond);
1131     SDL_UnlockMutex(is->pictq_mutex);
1132 }
1133
1134 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
1135     double time = av_gettime() / 1000000.0;
1136     /* update current video pts */
1137     is->video_current_pts = pts;
1138     is->video_current_pts_drift = is->video_current_pts - time;
1139     is->video_current_pos = pos;
1140     is->frame_last_pts = pts;
1141 }
1142
1143 /* called to display each frame */
1144 static void video_refresh(void *opaque)
1145 {
1146     VideoState *is = opaque;
1147     VideoPicture *vp;
1148     double time;
1149
1150     SubPicture *sp, *sp2;
1151
1152     if (is->video_st) {
1153 retry:
1154         if (is->pictq_size == 0) {
1155             SDL_LockMutex(is->pictq_mutex);
1156             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
1157                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
1158                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1159             }
1160             SDL_UnlockMutex(is->pictq_mutex);
1161             // nothing to do, no picture to display in the que
1162         } else {
1163             double last_duration, duration, delay;
1164             /* dequeue the picture */
1165             vp = &is->pictq[is->pictq_rindex];
1166
1167             if (vp->skip) {
1168                 pictq_next_picture(is);
1169                 goto retry;
1170             }
1171
1172             if (is->paused)
1173                 goto display;
1174
1175             /* compute nominal last_duration */
1176             last_duration = vp->pts - is->frame_last_pts;
1177             if (last_duration > 0 && last_duration < 10.0) {
1178                 /* if duration of the last frame was sane, update last_duration in video state */
1179                 is->frame_last_duration = last_duration;
1180             }
1181             delay = compute_target_delay(is->frame_last_duration, is);
1182
1183             time= av_gettime()/1000000.0;
1184             if (time < is->frame_timer + delay)
1185                 return;
1186
1187             if (delay > 0)
1188                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1189
1190             SDL_LockMutex(is->pictq_mutex);
1191             update_video_pts(is, vp->pts, vp->pos);
1192             SDL_UnlockMutex(is->pictq_mutex);
1193
1194             if (is->pictq_size > 1) {
1195                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1196                 duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
1197             } else {
1198                 duration = vp->duration;
1199             }
1200
1201             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
1202                 if(is->pictq_size > 1){
1203                     is->frame_drops_late++;
1204                     pictq_next_picture(is);
1205                     goto retry;
1206                 }
1207             }
1208
1209             if (is->subtitle_st) {
1210                 if (is->subtitle_stream_changed) {
1211                     SDL_LockMutex(is->subpq_mutex);
1212
1213                     while (is->subpq_size) {
1214                         free_subpicture(&is->subpq[is->subpq_rindex]);
1215
1216                         /* update queue size and signal for next picture */
1217                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1218                             is->subpq_rindex = 0;
1219
1220                         is->subpq_size--;
1221                     }
1222                     is->subtitle_stream_changed = 0;
1223
1224                     SDL_CondSignal(is->subpq_cond);
1225                     SDL_UnlockMutex(is->subpq_mutex);
1226                 } else {
1227                     if (is->subpq_size > 0) {
1228                         sp = &is->subpq[is->subpq_rindex];
1229
1230                         if (is->subpq_size > 1)
1231                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1232                         else
1233                             sp2 = NULL;
1234
1235                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1236                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1237                         {
1238                             free_subpicture(sp);
1239
1240                             /* update queue size and signal for next picture */
1241                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1242                                 is->subpq_rindex = 0;
1243
1244                             SDL_LockMutex(is->subpq_mutex);
1245                             is->subpq_size--;
1246                             SDL_CondSignal(is->subpq_cond);
1247                             SDL_UnlockMutex(is->subpq_mutex);
1248                         }
1249                     }
1250                 }
1251             }
1252
1253 display:
1254             /* display picture */
1255             if (!display_disable)
1256                 video_display(is);
1257
1258             if (!is->paused)
1259                 pictq_next_picture(is);
1260         }
1261     } else if (is->audio_st) {
1262         /* draw the next audio frame */
1263
1264         /* if only audio stream, then display the audio bars (better
1265            than nothing, just to test the implementation */
1266
1267         /* display picture */
1268         if (!display_disable)
1269             video_display(is);
1270     }
1271     is->force_refresh = 0;
1272     if (show_status) {
1273         static int64_t last_time;
1274         int64_t cur_time;
1275         int aqsize, vqsize, sqsize;
1276         double av_diff;
1277
1278         cur_time = av_gettime();
1279         if (!last_time || (cur_time - last_time) >= 30000) {
1280             aqsize = 0;
1281             vqsize = 0;
1282             sqsize = 0;
1283             if (is->audio_st)
1284                 aqsize = is->audioq.size;
1285             if (is->video_st)
1286                 vqsize = is->videoq.size;
1287             if (is->subtitle_st)
1288                 sqsize = is->subtitleq.size;
1289             av_diff = 0;
1290             if (is->audio_st && is->video_st)
1291                 av_diff = get_audio_clock(is) - get_video_clock(is);
1292             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1293                    get_master_clock(is),
1294                    av_diff,
1295                    is->frame_drops_early + is->frame_drops_late,
1296                    aqsize / 1024,
1297                    vqsize / 1024,
1298                    sqsize,
1299                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1300                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1301             fflush(stdout);
1302             last_time = cur_time;
1303         }
1304     }
1305 }
1306
1307 /* allocate a picture (needs to do that in main thread to avoid
1308    potential locking problems */
1309 static void alloc_picture(AllocEventProps *event_props)
1310 {
1311     VideoState *is = event_props->is;
1312     AVFrame *frame = event_props->frame;
1313     VideoPicture *vp;
1314
1315     vp = &is->pictq[is->pictq_windex];
1316
1317     if (vp->bmp)
1318         SDL_FreeYUVOverlay(vp->bmp);
1319
1320 #if CONFIG_AVFILTER
1321     if (vp->picref)
1322         avfilter_unref_buffer(vp->picref);
1323     vp->picref = NULL;
1324 #endif
1325
1326     vp->width   = frame->width;
1327     vp->height  = frame->height;
1328     vp->pix_fmt = frame->format;
1329
1330     video_open(event_props->is, 0);
1331
1332     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1333                                    SDL_YV12_OVERLAY,
1334                                    screen);
1335     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1336         /* SDL allocates a buffer smaller than requested if the video
1337          * overlay hardware is unable to support the requested size. */
1338         fprintf(stderr, "Error: the video system does not support an image\n"
1339                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1340                         "to reduce the image size.\n", vp->width, vp->height );
1341         do_exit(is);
1342     }
1343
1344     SDL_LockMutex(is->pictq_mutex);
1345     vp->allocated = 1;
1346     SDL_CondSignal(is->pictq_cond);
1347     SDL_UnlockMutex(is->pictq_mutex);
1348 }
1349
1350 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1351 {
1352     VideoPicture *vp;
1353     double frame_delay, pts = pts1;
1354
1355     /* compute the exact PTS for the picture if it is omitted in the stream
1356      * pts1 is the dts of the pkt / pts of the frame */
1357     if (pts != 0) {
1358         /* update video clock with pts, if present */
1359         is->video_clock = pts;
1360     } else {
1361         pts = is->video_clock;
1362     }
1363     /* update video clock for next frame */
1364     frame_delay = av_q2d(is->video_st->codec->time_base);
1365     /* for MPEG2, the frame can be repeated, so we update the
1366        clock accordingly */
1367     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1368     is->video_clock += frame_delay;
1369
1370 #if defined(DEBUG_SYNC) && 0
1371     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1372            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1373 #endif
1374
1375     /* wait until we have space to put a new picture */
1376     SDL_LockMutex(is->pictq_mutex);
1377
1378     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1379            !is->videoq.abort_request) {
1380         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1381     }
1382     SDL_UnlockMutex(is->pictq_mutex);
1383
1384     if (is->videoq.abort_request)
1385         return -1;
1386
1387     vp = &is->pictq[is->pictq_windex];
1388
1389     vp->duration = frame_delay;
1390
1391     /* alloc or resize hardware picture buffer */
1392     if (!vp->bmp || vp->reallocate ||
1393         vp->width  != src_frame->width ||
1394         vp->height != src_frame->height) {
1395         SDL_Event event;
1396         AllocEventProps event_props;
1397
1398         event_props.frame = src_frame;
1399         event_props.is = is;
1400
1401         vp->allocated  = 0;
1402         vp->reallocate = 0;
1403
1404         /* the allocation must be done in the main thread to avoid
1405            locking problems. We wait in this block for the event to complete,
1406            so we can pass a pointer to event_props to it. */
1407         event.type = FF_ALLOC_EVENT;
1408         event.user.data1 = &event_props;
1409         SDL_PushEvent(&event);
1410
1411         /* wait until the picture is allocated */
1412         SDL_LockMutex(is->pictq_mutex);
1413         while (!vp->allocated && !is->videoq.abort_request) {
1414             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1415         }
1416         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1417         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1418             while (!vp->allocated) {
1419                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1420             }
1421         }
1422         SDL_UnlockMutex(is->pictq_mutex);
1423
1424         if (is->videoq.abort_request)
1425             return -1;
1426     }
1427
1428     /* if the frame is not skipped, then display it */
1429     if (vp->bmp) {
1430         AVPicture pict = { { 0 } };
1431 #if CONFIG_AVFILTER
1432         if (vp->picref)
1433             avfilter_unref_buffer(vp->picref);
1434         vp->picref = src_frame->opaque;
1435 #endif
1436
1437         /* get a pointer on the bitmap */
1438         SDL_LockYUVOverlay (vp->bmp);
1439
1440         pict.data[0] = vp->bmp->pixels[0];
1441         pict.data[1] = vp->bmp->pixels[2];
1442         pict.data[2] = vp->bmp->pixels[1];
1443
1444         pict.linesize[0] = vp->bmp->pitches[0];
1445         pict.linesize[1] = vp->bmp->pitches[2];
1446         pict.linesize[2] = vp->bmp->pitches[1];
1447
1448 #if CONFIG_AVFILTER
1449         // FIXME use direct rendering
1450         av_picture_copy(&pict, (AVPicture *)src_frame,
1451                         vp->pix_fmt, vp->width, vp->height);
1452         vp->sample_aspect_ratio = vp->picref->video->sample_aspect_ratio;
1453 #else
1454         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1455         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1456             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1457             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1458         if (is->img_convert_ctx == NULL) {
1459             fprintf(stderr, "Cannot initialize the conversion context\n");
1460             exit(1);
1461         }
1462         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1463                   0, vp->height, pict.data, pict.linesize);
1464         vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1465 #endif
1466         /* update the bitmap content */
1467         SDL_UnlockYUVOverlay(vp->bmp);
1468
1469         vp->pts = pts;
1470         vp->pos = pos;
1471         vp->skip = 0;
1472
1473         /* now we can update the picture count */
1474         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1475             is->pictq_windex = 0;
1476         SDL_LockMutex(is->pictq_mutex);
1477         is->pictq_size++;
1478         SDL_UnlockMutex(is->pictq_mutex);
1479     }
1480     return 0;
1481 }
1482
1483 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1484 {
1485     int got_picture, i;
1486
1487     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1488         return -1;
1489
1490     if (pkt->data == flush_pkt.data) {
1491         avcodec_flush_buffers(is->video_st->codec);
1492
1493         SDL_LockMutex(is->pictq_mutex);
1494         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1495         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1496             is->pictq[i].skip = 1;
1497         }
1498         while (is->pictq_size && !is->videoq.abort_request) {
1499             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1500         }
1501         is->video_current_pos = -1;
1502         is->frame_last_pts = AV_NOPTS_VALUE;
1503         is->frame_last_duration = 0;
1504         is->frame_timer = (double)av_gettime() / 1000000.0;
1505         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
1506         SDL_UnlockMutex(is->pictq_mutex);
1507
1508         return 0;
1509     }
1510
1511     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1512
1513     if (got_picture) {
1514         int ret = 1;
1515
1516         if (decoder_reorder_pts == -1) {
1517             *pts = av_frame_get_best_effort_timestamp(frame);
1518         } else if (decoder_reorder_pts) {
1519             *pts = frame->pkt_pts;
1520         } else {
1521             *pts = frame->pkt_dts;
1522         }
1523
1524         if (*pts == AV_NOPTS_VALUE) {
1525             *pts = 0;
1526         }
1527
1528         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
1529              (framedrop>0 || (framedrop && is->audio_st))) {
1530             SDL_LockMutex(is->pictq_mutex);
1531             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1532                 double clockdiff = get_video_clock(is) - get_master_clock(is);
1533                 double dpts = av_q2d(is->video_st->time_base) * *pts;
1534                 double ptsdiff = dpts - is->frame_last_pts;
1535                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1536                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1537                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1538                     is->frame_last_dropped_pos = pkt->pos;
1539                     is->frame_last_dropped_pts = dpts;
1540                     is->frame_drops_early++;
1541                     ret = 0;
1542                 }
1543             }
1544             SDL_UnlockMutex(is->pictq_mutex);
1545         }
1546
1547         if (ret)
1548             is->frame_last_returned_time = av_gettime() / 1000000.0;
1549
1550         return ret;
1551     }
1552     return 0;
1553 }
1554
1555 #if CONFIG_AVFILTER
1556 typedef struct {
1557     VideoState *is;
1558     AVFrame *frame;
1559     int use_dr1;
1560 } FilterPriv;
1561
1562 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1563 {
1564     AVFilterContext *ctx = codec->opaque;
1565     AVFilterBufferRef  *ref;
1566     int perms = AV_PERM_WRITE;
1567     int i, w, h, stride[AV_NUM_DATA_POINTERS];
1568     unsigned edge;
1569     int pixel_size;
1570
1571     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1572
1573     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1574         perms |= AV_PERM_NEG_LINESIZES;
1575
1576     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1577         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1578         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1579         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1580     }
1581     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1582
1583     w = codec->width;
1584     h = codec->height;
1585
1586     if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
1587         return -1;
1588
1589     avcodec_align_dimensions2(codec, &w, &h, stride);
1590     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1591     w += edge << 1;
1592     h += edge << 1;
1593     if (codec->pix_fmt != ctx->outputs[0]->format) {
1594         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
1595         return -1;
1596     }
1597     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1598         return -1;
1599
1600     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
1601     ref->video->w = codec->width;
1602     ref->video->h = codec->height;
1603     for (i = 0; i < 4; i ++) {
1604         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1605         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1606
1607         pic->base[i]     = ref->data[i];
1608         if (ref->data[i]) {
1609             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1610         }
1611         pic->data[i]     = ref->data[i];
1612         pic->linesize[i] = ref->linesize[i];
1613     }
1614     pic->opaque = ref;
1615     pic->type   = FF_BUFFER_TYPE_USER;
1616     pic->reordered_opaque = codec->reordered_opaque;
1617     pic->width               = codec->width;
1618     pic->height              = codec->height;
1619     pic->format              = codec->pix_fmt;
1620     pic->sample_aspect_ratio = codec->sample_aspect_ratio;
1621     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1622     else            pic->pkt_pts = AV_NOPTS_VALUE;
1623     return 0;
1624 }
1625
1626 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1627 {
1628     memset(pic->data, 0, sizeof(pic->data));
1629     avfilter_unref_buffer(pic->opaque);
1630 }
1631
1632 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1633 {
1634     AVFilterBufferRef *ref = pic->opaque;
1635
1636     if (pic->data[0] == NULL) {
1637         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1638         return codec->get_buffer(codec, pic);
1639     }
1640
1641     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1642         (codec->pix_fmt != ref->format)) {
1643         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1644         return -1;
1645     }
1646
1647     pic->reordered_opaque = codec->reordered_opaque;
1648     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
1649     else            pic->pkt_pts = AV_NOPTS_VALUE;
1650     return 0;
1651 }
1652
1653 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1654 {
1655     FilterPriv *priv = ctx->priv;
1656     AVCodecContext *codec;
1657     if (!opaque) return -1;
1658
1659     priv->is = opaque;
1660     codec    = priv->is->video_st->codec;
1661     codec->opaque = ctx;
1662     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1663         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1664         priv->use_dr1 = 1;
1665         codec->get_buffer     = input_get_buffer;
1666         codec->release_buffer = input_release_buffer;
1667         codec->reget_buffer   = input_reget_buffer;
1668         codec->thread_safe_callbacks = 1;
1669     }
1670
1671     priv->frame = avcodec_alloc_frame();
1672
1673     return 0;
1674 }
1675
1676 static void input_uninit(AVFilterContext *ctx)
1677 {
1678     FilterPriv *priv = ctx->priv;
1679     av_free(priv->frame);
1680 }
1681
1682 static int input_request_frame(AVFilterLink *link)
1683 {
1684     FilterPriv *priv = link->src->priv;
1685     AVFilterBufferRef *picref;
1686     int64_t pts = 0;
1687     AVPacket pkt;
1688     int ret;
1689
1690     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1691         av_free_packet(&pkt);
1692     if (ret < 0)
1693         return -1;
1694
1695     if (priv->use_dr1 && priv->frame->opaque) {
1696         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1697     } else {
1698         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
1699         av_image_copy(picref->data, picref->linesize,
1700                       (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
1701                       picref->format, priv->frame->width, priv->frame->height);
1702     }
1703     av_free_packet(&pkt);
1704
1705     avfilter_copy_frame_props(picref, priv->frame);
1706     picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
1707     picref->pts = pts;
1708
1709     avfilter_start_frame(link, picref);
1710     avfilter_draw_slice(link, 0, picref->video->h, 1);
1711     avfilter_end_frame(link);
1712
1713     return 0;
1714 }
1715
1716 static int input_query_formats(AVFilterContext *ctx)
1717 {
1718     FilterPriv *priv = ctx->priv;
1719     enum PixelFormat pix_fmts[] = {
1720         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1721     };
1722
1723     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1724     return 0;
1725 }
1726
1727 static int input_config_props(AVFilterLink *link)
1728 {
1729     FilterPriv *priv  = link->src->priv;
1730     AVStream *s = priv->is->video_st;
1731
1732     link->w = s->codec->width;
1733     link->h = s->codec->height;
1734     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1735         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1736     link->time_base = s->time_base;
1737
1738     return 0;
1739 }
1740
1741 static AVFilter input_filter =
1742 {
1743     .name      = "ffplay_input",
1744
1745     .priv_size = sizeof(FilterPriv),
1746
1747     .init      = input_init,
1748     .uninit    = input_uninit,
1749
1750     .query_formats = input_query_formats,
1751
1752     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1753     .outputs   = (AVFilterPad[]) {{ .name = "default",
1754                                     .type = AVMEDIA_TYPE_VIDEO,
1755                                     .request_frame = input_request_frame,
1756                                     .config_props  = input_config_props, },
1757                                   { .name = NULL }},
1758 };
1759
1760 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1761 {
1762     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1763     char sws_flags_str[128];
1764     int ret;
1765     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1766     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
1767     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1768     graph->scale_sws_opts = av_strdup(sws_flags_str);
1769
1770     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1771                                             NULL, is, graph)) < 0)
1772         return ret;
1773
1774 #if FF_API_OLD_VSINK_API
1775     ret = avfilter_graph_create_filter(&filt_out,
1776                                        avfilter_get_by_name("buffersink"),
1777                                        "out", NULL, pix_fmts, graph);
1778 #else
1779     buffersink_params->pixel_fmts = pix_fmts;
1780     ret = avfilter_graph_create_filter(&filt_out,
1781                                        avfilter_get_by_name("buffersink"),
1782                                        "out", NULL, buffersink_params, graph);
1783 #endif
1784     av_freep(&buffersink_params);
1785     if (ret < 0)
1786         return ret;
1787
1788     if ((ret = avfilter_graph_create_filter(&filt_format,
1789                                             avfilter_get_by_name("format"),
1790                                             "format", "yuv420p", NULL, graph)) < 0)
1791         return ret;
1792     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1793         return ret;
1794
1795
1796     if (vfilters) {
1797         AVFilterInOut *outputs = avfilter_inout_alloc();
1798         AVFilterInOut *inputs  = avfilter_inout_alloc();
1799
1800         outputs->name    = av_strdup("in");
1801         outputs->filter_ctx = filt_src;
1802         outputs->pad_idx = 0;
1803         outputs->next    = NULL;
1804
1805         inputs->name    = av_strdup("out");
1806         inputs->filter_ctx = filt_format;
1807         inputs->pad_idx = 0;
1808         inputs->next    = NULL;
1809
1810         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1811             return ret;
1812     } else {
1813         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1814             return ret;
1815     }
1816
1817     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1818         return ret;
1819
1820     is->out_video_filter = filt_out;
1821
1822     return ret;
1823 }
1824
1825 #endif  /* CONFIG_AVFILTER */
1826
1827 static int video_thread(void *arg)
1828 {
1829     VideoState *is = arg;
1830     AVFrame *frame = avcodec_alloc_frame();
1831     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1832     double pts;
1833     int ret;
1834
1835 #if CONFIG_AVFILTER
1836     AVFilterGraph *graph = avfilter_graph_alloc();
1837     AVFilterContext *filt_out = NULL;
1838     int last_w = is->video_st->codec->width;
1839     int last_h = is->video_st->codec->height;
1840
1841     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1842         goto the_end;
1843     filt_out = is->out_video_filter;
1844 #endif
1845
1846     for (;;) {
1847 #if !CONFIG_AVFILTER
1848         AVPacket pkt;
1849 #else
1850         AVFilterBufferRef *picref;
1851         AVRational tb = filt_out->inputs[0]->time_base;
1852 #endif
1853         while (is->paused && !is->videoq.abort_request)
1854             SDL_Delay(10);
1855 #if CONFIG_AVFILTER
1856         if (   last_w != is->video_st->codec->width
1857             || last_h != is->video_st->codec->height) {
1858             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1859                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1860             avfilter_graph_free(&graph);
1861             graph = avfilter_graph_alloc();
1862             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1863                 goto the_end;
1864             filt_out = is->out_video_filter;
1865             last_w = is->video_st->codec->width;
1866             last_h = is->video_st->codec->height;
1867         }
1868         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1869         if (picref) {
1870             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1871             pts_int = picref->pts;
1872             tb      = filt_out->inputs[0]->time_base;
1873             pos     = picref->pos;
1874             frame->opaque = picref;
1875
1876             ret = 1;
1877         }
1878
1879         if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
1880             av_unused int64_t pts1 = pts_int;
1881             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1882             av_dlog(NULL, "video_thread(): "
1883                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1884                     tb.num, tb.den, pts1,
1885                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1886         }
1887 #else
1888         ret = get_video_frame(is, frame, &pts_int, &pkt);
1889         pos = pkt.pos;
1890         av_free_packet(&pkt);
1891         if (ret == 0)
1892             continue;
1893 #endif
1894
1895         if (ret < 0)
1896             goto the_end;
1897
1898         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
1899         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1900             is->frame_last_filter_delay = 0;
1901
1902 #if CONFIG_AVFILTER
1903         if (!picref)
1904             continue;
1905 #endif
1906
1907         pts = pts_int * av_q2d(is->video_st->time_base);
1908
1909         ret = queue_picture(is, frame, pts, pos);
1910
1911         if (ret < 0)
1912             goto the_end;
1913
1914         if (is->step)
1915             stream_toggle_pause(is);
1916     }
1917  the_end:
1918 #if CONFIG_AVFILTER
1919     av_freep(&vfilters);
1920     avfilter_graph_free(&graph);
1921 #endif
1922     av_free(frame);
1923     return 0;
1924 }
1925
1926 static int subtitle_thread(void *arg)
1927 {
1928     VideoState *is = arg;
1929     SubPicture *sp;
1930     AVPacket pkt1, *pkt = &pkt1;
1931     int got_subtitle;
1932     double pts;
1933     int i, j;
1934     int r, g, b, y, u, v, a;
1935
1936     for (;;) {
1937         while (is->paused && !is->subtitleq.abort_request) {
1938             SDL_Delay(10);
1939         }
1940         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1941             break;
1942
1943         if (pkt->data == flush_pkt.data) {
1944             avcodec_flush_buffers(is->subtitle_st->codec);
1945             continue;
1946         }
1947         SDL_LockMutex(is->subpq_mutex);
1948         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1949                !is->subtitleq.abort_request) {
1950             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1951         }
1952         SDL_UnlockMutex(is->subpq_mutex);
1953
1954         if (is->subtitleq.abort_request)
1955             return 0;
1956
1957         sp = &is->subpq[is->subpq_windex];
1958
1959        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1960            this packet, if any */
1961         pts = 0;
1962         if (pkt->pts != AV_NOPTS_VALUE)
1963             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1964
1965         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1966                                  &got_subtitle, pkt);
1967
1968         if (got_subtitle && sp->sub.format == 0) {
1969             sp->pts = pts;
1970
1971             for (i = 0; i < sp->sub.num_rects; i++)
1972             {
1973                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1974                 {
1975                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1976                     y = RGB_TO_Y_CCIR(r, g, b);
1977                     u = RGB_TO_U_CCIR(r, g, b, 0);
1978                     v = RGB_TO_V_CCIR(r, g, b, 0);
1979                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1980                 }
1981             }
1982
1983             /* now we can update the picture count */
1984             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1985                 is->subpq_windex = 0;
1986             SDL_LockMutex(is->subpq_mutex);
1987             is->subpq_size++;
1988             SDL_UnlockMutex(is->subpq_mutex);
1989         }
1990         av_free_packet(pkt);
1991     }
1992     return 0;
1993 }
1994
1995 /* copy samples for viewing in editor window */
1996 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1997 {
1998     int size, len;
1999
2000     size = samples_size / sizeof(short);
2001     while (size > 0) {
2002         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2003         if (len > size)
2004             len = size;
2005         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2006         samples += len;
2007         is->sample_array_index += len;
2008         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2009             is->sample_array_index = 0;
2010         size -= len;
2011     }
2012 }
2013
2014 /* return the wanted number of samples to get better sync if sync_type is video
2015  * or external master clock */
2016 static int synchronize_audio(VideoState *is, int nb_samples)
2017 {
2018     int wanted_nb_samples = nb_samples;
2019
2020     /* if not master, then we try to remove or add samples to correct the clock */
2021     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2022          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2023         double diff, avg_diff;
2024         int min_nb_samples, max_nb_samples;
2025
2026         diff = get_audio_clock(is) - get_master_clock(is);
2027
2028         if (diff < AV_NOSYNC_THRESHOLD) {
2029             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2030             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2031                 /* not enough measures to have a correct estimate */
2032                 is->audio_diff_avg_count++;
2033             } else {
2034                 /* estimate the A-V difference */
2035                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2036
2037                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2038                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
2039                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2040                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2041                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2042                 }
2043                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2044                         diff, avg_diff, wanted_nb_samples - nb_samples,
2045                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2046             }
2047         } else {
2048             /* too big difference : may be initial PTS errors, so
2049                reset A-V filter */
2050             is->audio_diff_avg_count = 0;
2051             is->audio_diff_cum       = 0;
2052         }
2053     }
2054
2055     return wanted_nb_samples;
2056 }
2057
2058 /* decode one audio frame and returns its uncompressed size */
2059 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2060 {
2061     AVPacket *pkt_temp = &is->audio_pkt_temp;
2062     AVPacket *pkt = &is->audio_pkt;
2063     AVCodecContext *dec = is->audio_st->codec;
2064     int len1, len2, data_size, resampled_data_size;
2065     int64_t dec_channel_layout;
2066     int got_frame;
2067     double pts;
2068     int new_packet = 0;
2069     int flush_complete = 0;
2070     int wanted_nb_samples;
2071
2072     for (;;) {
2073         /* NOTE: the audio packet can contain several frames */
2074         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2075             if (!is->frame) {
2076                 if (!(is->frame = avcodec_alloc_frame()))
2077                     return AVERROR(ENOMEM);
2078             } else
2079                 avcodec_get_frame_defaults(is->frame);
2080
2081             if (flush_complete)
2082                 break;
2083             new_packet = 0;
2084             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2085             if (len1 < 0) {
2086                 /* if error, we skip the frame */
2087                 pkt_temp->size = 0;
2088                 break;
2089             }
2090
2091             pkt_temp->data += len1;
2092             pkt_temp->size -= len1;
2093
2094             if (!got_frame) {
2095                 /* stop sending empty packets if the decoder is finished */
2096                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2097                     flush_complete = 1;
2098                 continue;
2099             }
2100             data_size = av_samples_get_buffer_size(NULL, dec->channels,
2101                                                    is->frame->nb_samples,
2102                                                    dec->sample_fmt, 1);
2103
2104             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
2105             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2106
2107             if (dec->sample_fmt != is->audio_src_fmt ||
2108                 dec_channel_layout != is->audio_src_channel_layout ||
2109                 dec->sample_rate != is->audio_src_freq ||
2110                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2111                 if (is->swr_ctx)
2112                     swr_free(&is->swr_ctx);
2113                 is->swr_ctx = swr_alloc_set_opts(NULL,
2114                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
2115                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
2116                                                  0, NULL);
2117                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2118                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2119                         dec->sample_rate,
2120                         av_get_sample_fmt_name(dec->sample_fmt),
2121                         dec->channels,
2122                         is->audio_tgt_freq,
2123                         av_get_sample_fmt_name(is->audio_tgt_fmt),
2124                         is->audio_tgt_channels);
2125                     break;
2126                 }
2127                 is->audio_src_channel_layout = dec_channel_layout;
2128                 is->audio_src_channels = dec->channels;
2129                 is->audio_src_freq = dec->sample_rate;
2130                 is->audio_src_fmt = dec->sample_fmt;
2131             }
2132
2133             resampled_data_size = data_size;
2134             if (is->swr_ctx) {
2135                 const uint8_t *in[] = { is->frame->data[0] };
2136                 uint8_t *out[] = {is->audio_buf2};
2137                 if (wanted_nb_samples != is->frame->nb_samples) {
2138                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
2139                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
2140                         fprintf(stderr, "swr_set_compensation() failed\n");
2141                         break;
2142                     }
2143                 }
2144                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
2145                                                 in, is->frame->nb_samples);
2146                 if (len2 < 0) {
2147                     fprintf(stderr, "audio_resample() failed\n");
2148                     break;
2149                 }
2150                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
2151                     fprintf(stderr, "warning: audio buffer is probably too small\n");
2152                     swr_init(is->swr_ctx);
2153                 }
2154                 is->audio_buf = is->audio_buf2;
2155                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2156             } else {
2157                 is->audio_buf = is->frame->data[0];
2158             }
2159
2160             /* if no pts, then compute it */
2161             pts = is->audio_clock;
2162             *pts_ptr = pts;
2163             is->audio_clock += (double)data_size /
2164                 (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
2165 #ifdef DEBUG
2166             {
2167                 static double last_clock;
2168                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2169                        is->audio_clock - last_clock,
2170                        is->audio_clock, pts);
2171                 last_clock = is->audio_clock;
2172             }
2173 #endif
2174             return resampled_data_size;
2175         }
2176
2177         /* free the current packet */
2178         if (pkt->data)
2179             av_free_packet(pkt);
2180         memset(pkt_temp, 0, sizeof(*pkt_temp));
2181
2182         if (is->paused || is->audioq.abort_request) {
2183             return -1;
2184         }
2185
2186         /* read next packet */
2187         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2188             return -1;
2189
2190         if (pkt->data == flush_pkt.data) {
2191             avcodec_flush_buffers(dec);
2192             flush_complete = 0;
2193         }
2194
2195         *pkt_temp = *pkt;
2196
2197         /* if update the audio clock with the pts */
2198         if (pkt->pts != AV_NOPTS_VALUE) {
2199             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2200         }
2201     }
2202 }
2203
2204 /* prepare a new audio buffer */
2205 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2206 {
2207     VideoState *is = opaque;
2208     int audio_size, len1;
2209     int bytes_per_sec;
2210     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
2211     double pts;
2212
2213     audio_callback_time = av_gettime();
2214
2215     while (len > 0) {
2216         if (is->audio_buf_index >= is->audio_buf_size) {
2217            audio_size = audio_decode_frame(is, &pts);
2218            if (audio_size < 0) {
2219                 /* if error, just output silence */
2220                is->audio_buf      = is->silence_buf;
2221                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2222            } else {
2223                if (is->show_mode != SHOW_MODE_VIDEO)
2224                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2225                is->audio_buf_size = audio_size;
2226            }
2227            is->audio_buf_index = 0;
2228         }
2229         len1 = is->audio_buf_size - is->audio_buf_index;
2230         if (len1 > len)
2231             len1 = len;
2232         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2233         len -= len1;
2234         stream += len1;
2235         is->audio_buf_index += len1;
2236     }
2237     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
2238     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2239     /* Let's assume the audio driver that is used by SDL has two periods. */
2240     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2241     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2242 }
2243
2244 /* open a given stream. Return 0 if OK */
2245 static int stream_component_open(VideoState *is, int stream_index)
2246 {
2247     AVFormatContext *ic = is->ic;
2248     AVCodecContext *avctx;
2249     AVCodec *codec;
2250     SDL_AudioSpec wanted_spec, spec;
2251     AVDictionary *opts;
2252     AVDictionaryEntry *t = NULL;
2253     int64_t wanted_channel_layout = 0;
2254     int wanted_nb_channels;
2255     const char *env;
2256
2257     if (stream_index < 0 || stream_index >= ic->nb_streams)
2258         return -1;
2259     avctx = ic->streams[stream_index]->codec;
2260
2261     codec = avcodec_find_decoder(avctx->codec_id);
2262     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
2263
2264     switch(avctx->codec_type){
2265         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
2266         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
2267         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
2268     }
2269     if (!codec)
2270         return -1;
2271
2272     avctx->workaround_bugs   = workaround_bugs;
2273     avctx->lowres            = lowres;
2274     if(avctx->lowres > codec->max_lowres){
2275         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2276                 codec->max_lowres);
2277         avctx->lowres= codec->max_lowres;
2278     }
2279     avctx->idct_algo         = idct;
2280     avctx->skip_frame        = skip_frame;
2281     avctx->skip_idct         = skip_idct;
2282     avctx->skip_loop_filter  = skip_loop_filter;
2283     avctx->error_concealment = error_concealment;
2284
2285     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2286     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2287     if(codec->capabilities & CODEC_CAP_DR1)
2288         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2289
2290     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2291         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2292         env = SDL_getenv("SDL_AUDIO_CHANNELS");
2293         if (env)
2294             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
2295         if (!wanted_channel_layout) {
2296             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
2297             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2298             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2299             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
2300             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > 6)) {
2301                 wanted_nb_channels--;
2302                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2303             }
2304         }
2305         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2306         wanted_spec.freq = avctx->sample_rate;
2307         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2308             fprintf(stderr, "Invalid sample rate or channel count!\n");
2309             return -1;
2310         }
2311     }
2312
2313     if (!av_dict_get(opts, "threads", NULL, 0))
2314         av_dict_set(&opts, "threads", "auto", 0);
2315     if (!codec ||
2316         avcodec_open2(avctx, codec, &opts) < 0)
2317         return -1;
2318     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2319         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2320         return AVERROR_OPTION_NOT_FOUND;
2321     }
2322
2323     /* prepare audio output */
2324     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2325         wanted_spec.format = AUDIO_S16SYS;
2326         wanted_spec.silence = 0;
2327         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2328         wanted_spec.callback = sdl_audio_callback;
2329         wanted_spec.userdata = is;
2330         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2331             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2332             return -1;
2333         }
2334         is->audio_hw_buf_size = spec.size;
2335         if (spec.format != AUDIO_S16SYS) {
2336             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2337             return -1;
2338         }
2339         if (spec.channels != wanted_spec.channels) {
2340             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2341             if (!wanted_channel_layout) {
2342                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2343                 return -1;
2344             }
2345         }
2346         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
2347         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
2348         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
2349         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
2350     }
2351
2352     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2353     switch (avctx->codec_type) {
2354     case AVMEDIA_TYPE_AUDIO:
2355         is->audio_stream = stream_index;
2356         is->audio_st = ic->streams[stream_index];
2357         is->audio_buf_size  = 0;
2358         is->audio_buf_index = 0;
2359
2360         /* init averaging filter */
2361         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2362         is->audio_diff_avg_count = 0;
2363         /* since we do not have a precise anough audio fifo fullness,
2364            we correct audio sync only if larger than this threshold */
2365         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
2366
2367         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2368         packet_queue_start(&is->audioq);
2369         SDL_PauseAudio(0);
2370         break;
2371     case AVMEDIA_TYPE_VIDEO:
2372         is->video_stream = stream_index;
2373         is->video_st = ic->streams[stream_index];
2374
2375         packet_queue_start(&is->videoq);
2376         is->video_tid = SDL_CreateThread(video_thread, is);
2377         break;
2378     case AVMEDIA_TYPE_SUBTITLE:
2379         is->subtitle_stream = stream_index;
2380         is->subtitle_st = ic->streams[stream_index];
2381         packet_queue_start(&is->subtitleq);
2382
2383         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2384         break;
2385     default:
2386         break;
2387     }
2388     return 0;
2389 }
2390
2391 static void stream_component_close(VideoState *is, int stream_index)
2392 {
2393     AVFormatContext *ic = is->ic;
2394     AVCodecContext *avctx;
2395
2396     if (stream_index < 0 || stream_index >= ic->nb_streams)
2397         return;
2398     avctx = ic->streams[stream_index]->codec;
2399
2400     switch (avctx->codec_type) {
2401     case AVMEDIA_TYPE_AUDIO:
2402         packet_queue_abort(&is->audioq);
2403
2404         SDL_CloseAudio();
2405
2406         packet_queue_flush(&is->audioq);
2407         av_free_packet(&is->audio_pkt);
2408         if (is->swr_ctx)
2409             swr_free(&is->swr_ctx);
2410         av_freep(&is->audio_buf1);
2411         is->audio_buf = NULL;
2412         av_freep(&is->frame);
2413
2414         if (is->rdft) {
2415             av_rdft_end(is->rdft);
2416             av_freep(&is->rdft_data);
2417             is->rdft = NULL;
2418             is->rdft_bits = 0;
2419         }
2420         break;
2421     case AVMEDIA_TYPE_VIDEO:
2422         packet_queue_abort(&is->videoq);
2423
2424         /* note: we also signal this mutex to make sure we deblock the
2425            video thread in all cases */
2426         SDL_LockMutex(is->pictq_mutex);
2427         SDL_CondSignal(is->pictq_cond);
2428         SDL_UnlockMutex(is->pictq_mutex);
2429
2430         SDL_WaitThread(is->video_tid, NULL);
2431
2432         packet_queue_flush(&is->videoq);
2433         break;
2434     case AVMEDIA_TYPE_SUBTITLE:
2435         packet_queue_abort(&is->subtitleq);
2436
2437         /* note: we also signal this mutex to make sure we deblock the
2438            video thread in all cases */
2439         SDL_LockMutex(is->subpq_mutex);
2440         is->subtitle_stream_changed = 1;
2441
2442         SDL_CondSignal(is->subpq_cond);
2443         SDL_UnlockMutex(is->subpq_mutex);
2444
2445         SDL_WaitThread(is->subtitle_tid, NULL);
2446
2447         packet_queue_flush(&is->subtitleq);
2448         break;
2449     default:
2450         break;
2451     }
2452
2453     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2454     avcodec_close(avctx);
2455     switch (avctx->codec_type) {
2456     case AVMEDIA_TYPE_AUDIO:
2457         is->audio_st = NULL;
2458         is->audio_stream = -1;
2459         break;
2460     case AVMEDIA_TYPE_VIDEO:
2461         is->video_st = NULL;
2462         is->video_stream = -1;
2463         break;
2464     case AVMEDIA_TYPE_SUBTITLE:
2465         is->subtitle_st = NULL;
2466         is->subtitle_stream = -1;
2467         break;
2468     default:
2469         break;
2470     }
2471 }
2472
2473 static int decode_interrupt_cb(void *ctx)
2474 {
2475     VideoState *is = ctx;
2476     return is->abort_request;
2477 }
2478
2479 /* this thread gets the stream from the disk or the network */
2480 static int read_thread(void *arg)
2481 {
2482     VideoState *is = arg;
2483     AVFormatContext *ic = NULL;
2484     int err, i, ret;
2485     int st_index[AVMEDIA_TYPE_NB];
2486     AVPacket pkt1, *pkt = &pkt1;
2487     int eof = 0;
2488     int pkt_in_play_range = 0;
2489     AVDictionaryEntry *t;
2490     AVDictionary **opts;
2491     int orig_nb_streams;
2492
2493     memset(st_index, -1, sizeof(st_index));
2494     is->video_stream = -1;
2495     is->audio_stream = -1;
2496     is->subtitle_stream = -1;
2497
2498     ic = avformat_alloc_context();
2499     ic->interrupt_callback.callback = decode_interrupt_cb;
2500     ic->interrupt_callback.opaque = is;
2501     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2502     if (err < 0) {
2503         print_error(is->filename, err);
2504         ret = -1;
2505         goto fail;
2506     }
2507     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2508         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2509         ret = AVERROR_OPTION_NOT_FOUND;
2510         goto fail;
2511     }
2512     is->ic = ic;
2513
2514     if (genpts)
2515         ic->flags |= AVFMT_FLAG_GENPTS;
2516
2517     opts = setup_find_stream_info_opts(ic, codec_opts);
2518     orig_nb_streams = ic->nb_streams;
2519
2520     err = avformat_find_stream_info(ic, opts);
2521     if (err < 0) {
2522         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2523         ret = -1;
2524         goto fail;
2525     }
2526     for (i = 0; i < orig_nb_streams; i++)
2527         av_dict_free(&opts[i]);
2528     av_freep(&opts);
2529
2530     if (ic->pb)
2531         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2532
2533     if (seek_by_bytes < 0)
2534         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2535
2536     /* if seeking requested, we execute it */
2537     if (start_time != AV_NOPTS_VALUE) {
2538         int64_t timestamp;
2539
2540         timestamp = start_time;
2541         /* add the stream start time */
2542         if (ic->start_time != AV_NOPTS_VALUE)
2543             timestamp += ic->start_time;
2544         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2545         if (ret < 0) {
2546             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2547                     is->filename, (double)timestamp / AV_TIME_BASE);
2548         }
2549     }
2550
2551     for (i = 0; i < ic->nb_streams; i++)
2552         ic->streams[i]->discard = AVDISCARD_ALL;
2553     if (!video_disable)
2554         st_index[AVMEDIA_TYPE_VIDEO] =
2555             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2556                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2557     if (!audio_disable)
2558         st_index[AVMEDIA_TYPE_AUDIO] =
2559             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2560                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2561                                 st_index[AVMEDIA_TYPE_VIDEO],
2562                                 NULL, 0);
2563     if (!video_disable)
2564         st_index[AVMEDIA_TYPE_SUBTITLE] =
2565             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2566                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2567                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2568                                  st_index[AVMEDIA_TYPE_AUDIO] :
2569                                  st_index[AVMEDIA_TYPE_VIDEO]),
2570                                 NULL, 0);
2571     if (show_status) {
2572         av_dump_format(ic, 0, is->filename, 0);
2573     }
2574
2575     is->show_mode = show_mode;
2576
2577     /* open the streams */
2578     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2579         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2580     }
2581
2582     ret = -1;
2583     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2584         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2585     }
2586     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2587     if (is->show_mode == SHOW_MODE_NONE)
2588         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2589
2590     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2591         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2592     }
2593
2594     if (is->video_stream < 0 && is->audio_stream < 0) {
2595         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2596         ret = -1;
2597         goto fail;
2598     }
2599
2600     for (;;) {
2601         if (is->abort_request)
2602             break;
2603         if (is->paused != is->last_paused) {
2604             is->last_paused = is->paused;
2605             if (is->paused)
2606                 is->read_pause_return = av_read_pause(ic);
2607             else
2608                 av_read_play(ic);
2609         }
2610 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2611         if (is->paused &&
2612                 (!strcmp(ic->iformat->name, "rtsp") ||
2613                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2614             /* wait 10 ms to avoid trying to get another packet */
2615             /* XXX: horrible */
2616             SDL_Delay(10);
2617             continue;
2618         }
2619 #endif
2620         if (is->seek_req) {
2621             int64_t seek_target = is->seek_pos;
2622             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2623             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2624 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2625 //      of the seek_pos/seek_rel variables
2626
2627             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2628             if (ret < 0) {
2629                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2630             } else {
2631                 if (is->audio_stream >= 0) {
2632                     packet_queue_flush(&is->audioq);
2633                     packet_queue_put(&is->audioq, &flush_pkt);
2634                 }
2635                 if (is->subtitle_stream >= 0) {
2636                     packet_queue_flush(&is->subtitleq);
2637                     packet_queue_put(&is->subtitleq, &flush_pkt);
2638                 }
2639                 if (is->video_stream >= 0) {
2640                     packet_queue_flush(&is->videoq);
2641                     packet_queue_put(&is->videoq, &flush_pkt);
2642                 }
2643             }
2644             is->seek_req = 0;
2645             eof = 0;
2646         }
2647
2648         /* if the queue are full, no need to read more */
2649         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2650             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2651                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2652                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request))) {
2653             /* wait 10 ms */
2654             SDL_Delay(10);
2655             continue;
2656         }
2657         if (eof) {
2658             if (is->video_stream >= 0) {
2659                 av_init_packet(pkt);
2660                 pkt->data = NULL;
2661                 pkt->size = 0;
2662                 pkt->stream_index = is->video_stream;
2663                 packet_queue_put(&is->videoq, pkt);
2664             }
2665             if (is->audio_stream >= 0 &&
2666                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2667                 av_init_packet(pkt);
2668                 pkt->data = NULL;
2669                 pkt->size = 0;
2670                 pkt->stream_index = is->audio_stream;
2671                 packet_queue_put(&is->audioq, pkt);
2672             }
2673             SDL_Delay(10);
2674             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2675                 if (loop != 1 && (!loop || --loop)) {
2676                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2677                 } else if (autoexit) {
2678                     ret = AVERROR_EOF;
2679                     goto fail;
2680                 }
2681             }
2682             eof=0;
2683             continue;
2684         }
2685         ret = av_read_frame(ic, pkt);
2686         if (ret < 0) {
2687             if (ret == AVERROR_EOF || url_feof(ic->pb))
2688                 eof = 1;
2689             if (ic->pb && ic->pb->error)
2690                 break;
2691             SDL_Delay(100); /* wait for user event */
2692             continue;
2693         }
2694         /* check if packet is in play range specified by user, then queue, otherwise discard */
2695         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2696                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2697                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2698                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2699                 <= ((double)duration / 1000000);
2700         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2701             packet_queue_put(&is->audioq, pkt);
2702         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2703             packet_queue_put(&is->videoq, pkt);
2704         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2705             packet_queue_put(&is->subtitleq, pkt);
2706         } else {
2707             av_free_packet(pkt);
2708         }
2709     }
2710     /* wait until the end */
2711     while (!is->abort_request) {
2712         SDL_Delay(100);
2713     }
2714
2715     ret = 0;
2716  fail:
2717     /* close each stream */
2718     if (is->audio_stream >= 0)
2719         stream_component_close(is, is->audio_stream);
2720     if (is->video_stream >= 0)
2721         stream_component_close(is, is->video_stream);
2722     if (is->subtitle_stream >= 0)
2723         stream_component_close(is, is->subtitle_stream);
2724     if (is->ic) {
2725         avformat_close_input(&is->ic);
2726     }
2727
2728     if (ret != 0) {
2729         SDL_Event event;
2730
2731         event.type = FF_QUIT_EVENT;
2732         event.user.data1 = is;
2733         SDL_PushEvent(&event);
2734     }
2735     return 0;
2736 }
2737
2738 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2739 {
2740     VideoState *is;
2741
2742     is = av_mallocz(sizeof(VideoState));
2743     if (!is)
2744         return NULL;
2745     av_strlcpy(is->filename, filename, sizeof(is->filename));
2746     is->iformat = iformat;
2747     is->ytop    = 0;
2748     is->xleft   = 0;
2749
2750     /* start video display */
2751     is->pictq_mutex = SDL_CreateMutex();
2752     is->pictq_cond  = SDL_CreateCond();
2753
2754     is->subpq_mutex = SDL_CreateMutex();
2755     is->subpq_cond  = SDL_CreateCond();
2756
2757     packet_queue_init(&is->videoq);
2758     packet_queue_init(&is->audioq);
2759     packet_queue_init(&is->subtitleq);
2760
2761     is->av_sync_type = av_sync_type;
2762     is->read_tid     = SDL_CreateThread(read_thread, is);
2763     if (!is->read_tid) {
2764         av_free(is);
2765         return NULL;
2766     }
2767     return is;
2768 }
2769
2770 static void stream_cycle_channel(VideoState *is, int codec_type)
2771 {
2772     AVFormatContext *ic = is->ic;
2773     int start_index, stream_index;
2774     AVStream *st;
2775
2776     if (codec_type == AVMEDIA_TYPE_VIDEO)
2777         start_index = is->video_stream;
2778     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2779         start_index = is->audio_stream;
2780     else
2781         start_index = is->subtitle_stream;
2782     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2783         return;
2784     stream_index = start_index;
2785     for (;;) {
2786         if (++stream_index >= is->ic->nb_streams)
2787         {
2788             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2789             {
2790                 stream_index = -1;
2791                 goto the_end;
2792             } else
2793                 stream_index = 0;
2794         }
2795         if (stream_index == start_index)
2796             return;
2797         st = ic->streams[stream_index];
2798         if (st->codec->codec_type == codec_type) {
2799             /* check that parameters are OK */
2800             switch (codec_type) {
2801             case AVMEDIA_TYPE_AUDIO:
2802                 if (st->codec->sample_rate != 0 &&
2803                     st->codec->channels != 0)
2804                     goto the_end;
2805                 break;
2806             case AVMEDIA_TYPE_VIDEO:
2807             case AVMEDIA_TYPE_SUBTITLE:
2808                 goto the_end;
2809             default:
2810                 break;
2811             }
2812         }
2813     }
2814  the_end:
2815     stream_component_close(is, start_index);
2816     stream_component_open(is, stream_index);
2817 }
2818
2819
2820 static void toggle_full_screen(VideoState *is)
2821 {
2822     av_unused int i;
2823     is_full_screen = !is_full_screen;
2824 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2825     /* OS X needs to reallocate the SDL overlays */
2826     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2827         is->pictq[i].reallocate = 1;
2828     }
2829 #endif
2830     video_open(is, 1);
2831 }
2832
2833 static void toggle_pause(VideoState *is)
2834 {
2835     stream_toggle_pause(is);
2836     is->step = 0;
2837 }
2838
2839 static void step_to_next_frame(VideoState *is)
2840 {
2841     /* if the stream is paused unpause it, then step */
2842     if (is->paused)
2843         stream_toggle_pause(is);
2844     is->step = 1;
2845 }
2846
2847 static void toggle_audio_display(VideoState *is)
2848 {
2849     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2850     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2851     fill_rectangle(screen,
2852                 is->xleft, is->ytop, is->width, is->height,
2853                 bgcolor);
2854     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
2855 }
2856
2857 /* handle an event sent by the GUI */
2858 static void event_loop(VideoState *cur_stream)
2859 {
2860     SDL_Event event;
2861     double incr, pos, frac;
2862
2863     for (;;) {
2864         double x;
2865         SDL_WaitEvent(&event);
2866         switch (event.type) {
2867         case SDL_KEYDOWN:
2868             if (exit_on_keydown) {
2869                 do_exit(cur_stream);
2870                 break;
2871             }
2872             switch (event.key.keysym.sym) {
2873             case SDLK_ESCAPE:
2874             case SDLK_q:
2875                 do_exit(cur_stream);
2876                 break;
2877             case SDLK_f:
2878                 toggle_full_screen(cur_stream);
2879                 cur_stream->force_refresh = 1;
2880                 break;
2881             case SDLK_p:
2882             case SDLK_SPACE:
2883                 toggle_pause(cur_stream);
2884                 break;
2885             case SDLK_s: // S: Step to next frame
2886                 step_to_next_frame(cur_stream);
2887                 break;
2888             case SDLK_a:
2889                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2890                 break;
2891             case SDLK_v:
2892                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2893                 break;
2894             case SDLK_t:
2895                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2896                 break;
2897             case SDLK_w:
2898                 toggle_audio_display(cur_stream);
2899                 cur_stream->force_refresh = 1;
2900                 break;
2901             case SDLK_PAGEUP:
2902                 incr = 600.0;
2903                 goto do_seek;
2904             case SDLK_PAGEDOWN:
2905                 incr = -600.0;
2906                 goto do_seek;
2907             case SDLK_LEFT:
2908                 incr = -10.0;
2909                 goto do_seek;
2910             case SDLK_RIGHT:
2911                 incr = 10.0;
2912                 goto do_seek;
2913             case SDLK_UP:
2914                 incr = 60.0;
2915                 goto do_seek;
2916             case SDLK_DOWN:
2917                 incr = -60.0;
2918             do_seek:
2919                     if (seek_by_bytes) {
2920                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2921                             pos = cur_stream->video_current_pos;
2922                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2923                             pos = cur_stream->audio_pkt.pos;
2924                         } else
2925                             pos = avio_tell(cur_stream->ic->pb);
2926                         if (cur_stream->ic->bit_rate)
2927                             incr *= cur_stream->ic->bit_rate / 8.0;
2928                         else
2929                             incr *= 180000.0;
2930                         pos += incr;
2931                         stream_seek(cur_stream, pos, incr, 1);
2932                     } else {
2933                         pos = get_master_clock(cur_stream);
2934                         pos += incr;
2935                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2936                     }
2937                 break;
2938             default:
2939                 break;
2940             }
2941             break;
2942         case SDL_VIDEOEXPOSE:
2943             cur_stream->force_refresh = 1;
2944             break;
2945         case SDL_MOUSEBUTTONDOWN:
2946             if (exit_on_mousedown) {
2947                 do_exit(cur_stream);
2948                 break;
2949             }
2950         case SDL_MOUSEMOTION:
2951             if (event.type == SDL_MOUSEBUTTONDOWN) {
2952                 x = event.button.x;
2953             } else {
2954                 if (event.motion.state != SDL_PRESSED)
2955                     break;
2956                 x = event.motion.x;
2957             }
2958                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2959                     uint64_t size =  avio_size(cur_stream->ic->pb);
2960                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2961                 } else {
2962                     int64_t ts;
2963                     int ns, hh, mm, ss;
2964                     int tns, thh, tmm, tss;
2965                     tns  = cur_stream->ic->duration / 1000000LL;
2966                     thh  = tns / 3600;
2967                     tmm  = (tns % 3600) / 60;
2968                     tss  = (tns % 60);
2969                     frac = x / cur_stream->width;
2970                     ns   = frac * tns;
2971                     hh   = ns / 3600;
2972                     mm   = (ns % 3600) / 60;
2973                     ss   = (ns % 60);
2974                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2975                             hh, mm, ss, thh, tmm, tss);
2976                     ts = frac * cur_stream->ic->duration;
2977                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2978                         ts += cur_stream->ic->start_time;
2979                     stream_seek(cur_stream, ts, 0, 0);
2980                 }
2981             break;
2982         case SDL_VIDEORESIZE:
2983                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2984                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2985                 screen_width  = cur_stream->width  = event.resize.w;
2986                 screen_height = cur_stream->height = event.resize.h;
2987                 cur_stream->force_refresh = 1;
2988             break;
2989         case SDL_QUIT:
2990         case FF_QUIT_EVENT:
2991             do_exit(cur_stream);
2992             break;
2993         case FF_ALLOC_EVENT:
2994             alloc_picture(event.user.data1);
2995             break;
2996         case FF_REFRESH_EVENT:
2997             video_refresh(event.user.data1);
2998             cur_stream->refresh = 0;
2999             break;
3000         default:
3001             break;
3002         }
3003     }
3004 }
3005
3006 static int opt_frame_size(const char *opt, const char *arg)
3007 {
3008     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3009     return opt_default("video_size", arg);
3010 }
3011
3012 static int opt_width(const char *opt, const char *arg)
3013 {
3014     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3015     return 0;
3016 }
3017
3018 static int opt_height(const char *opt, const char *arg)
3019 {
3020     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3021     return 0;
3022 }
3023
3024 static int opt_format(const char *opt, const char *arg)
3025 {
3026     file_iformat = av_find_input_format(arg);
3027     if (!file_iformat) {
3028         fprintf(stderr, "Unknown input format: %s\n", arg);
3029         return AVERROR(EINVAL);
3030     }
3031     return 0;
3032 }
3033
3034 static int opt_frame_pix_fmt(const char *opt, const char *arg)
3035 {
3036     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3037     return opt_default("pixel_format", arg);
3038 }
3039
3040 static int opt_sync(const char *opt, const char *arg)
3041 {
3042     if (!strcmp(arg, "audio"))
3043         av_sync_type = AV_SYNC_AUDIO_MASTER;
3044     else if (!strcmp(arg, "video"))
3045         av_sync_type = AV_SYNC_VIDEO_MASTER;
3046     else if (!strcmp(arg, "ext"))
3047         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3048     else {
3049         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3050         exit(1);
3051     }
3052     return 0;
3053 }
3054
3055 static int opt_seek(const char *opt, const char *arg)
3056 {
3057     start_time = parse_time_or_die(opt, arg, 1);
3058     return 0;
3059 }
3060
3061 static int opt_duration(const char *opt, const char *arg)
3062 {
3063     duration = parse_time_or_die(opt, arg, 1);
3064     return 0;
3065 }
3066
3067 static int opt_show_mode(const char *opt, const char *arg)
3068 {
3069     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3070                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3071                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
3072                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3073     return 0;
3074 }
3075
3076 static void opt_input_file(void *optctx, const char *filename)
3077 {
3078     if (input_filename) {
3079         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3080                 filename, input_filename);
3081         exit_program(1);
3082     }
3083     if (!strcmp(filename, "-"))
3084         filename = "pipe:";
3085     input_filename = filename;
3086 }
3087
3088 static int opt_codec(void *o, const char *opt, const char *arg)
3089 {
3090     switch(opt[strlen(opt)-1]){
3091     case 'a' :    audio_codec_name = arg; break;
3092     case 's' : subtitle_codec_name = arg; break;
3093     case 'v' :    video_codec_name = arg; break;
3094     }
3095     return 0;
3096 }
3097
3098 static int dummy;
3099
3100 static const OptionDef options[] = {
3101 #include "cmdutils_common_opts.h"
3102     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
3103     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
3104     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3105     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
3106     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
3107     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
3108     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3109     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3110     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3111     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
3112     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
3113     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3114     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
3115     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
3116     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
3117     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
3118     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
3119     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
3120     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
3121     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3122     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
3123     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
3124     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
3125     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
3126     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
3127     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
3128     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3129     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
3130     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
3131     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
3132     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
3133     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
3134     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
3135 #if CONFIG_AVFILTER
3136     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
3137 #endif
3138     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
3139     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3140     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
3141     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
3142     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
3143     { NULL, },
3144 };
3145
3146 static void show_usage(void)
3147 {
3148     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3149     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3150     av_log(NULL, AV_LOG_INFO, "\n");
3151 }
3152
3153 static int opt_help(const char *opt, const char *arg)
3154 {
3155     av_log_set_callback(log_callback_help);
3156     show_usage();
3157     show_help_options(options, "Main options:\n",
3158                       OPT_EXPERT, 0);
3159     show_help_options(options, "\nAdvanced options:\n",
3160                       OPT_EXPERT, OPT_EXPERT);
3161     printf("\n");
3162     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3163     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3164 #if !CONFIG_AVFILTER
3165     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3166 #endif
3167     printf("\nWhile playing:\n"
3168            "q, ESC              quit\n"
3169            "f                   toggle full screen\n"
3170            "p, SPC              pause\n"
3171            "a                   cycle audio channel\n"
3172            "v                   cycle video channel\n"
3173            "t                   cycle subtitle channel\n"
3174            "w                   show audio waves\n"
3175            "s                   activate frame-step mode\n"
3176            "left/right          seek backward/forward 10 seconds\n"
3177            "down/up             seek backward/forward 1 minute\n"
3178            "page down/page up   seek backward/forward 10 minutes\n"
3179            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3180            );
3181     return 0;
3182 }
3183
3184 static int lockmgr(void **mtx, enum AVLockOp op)
3185 {
3186    switch(op) {
3187       case AV_LOCK_CREATE:
3188           *mtx = SDL_CreateMutex();
3189           if(!*mtx)
3190               return 1;
3191           return 0;
3192       case AV_LOCK_OBTAIN:
3193           return !!SDL_LockMutex(*mtx);
3194       case AV_LOCK_RELEASE:
3195           return !!SDL_UnlockMutex(*mtx);
3196       case AV_LOCK_DESTROY:
3197           SDL_DestroyMutex(*mtx);
3198           return 0;
3199    }
3200    return 1;
3201 }
3202
3203 /* Called from the main */
3204 int main(int argc, char **argv)
3205 {
3206     int flags;
3207     VideoState *is;
3208
3209     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3210     parse_loglevel(argc, argv, options);
3211
3212     /* register all codecs, demux and protocols */
3213     avcodec_register_all();
3214 #if CONFIG_AVDEVICE
3215     avdevice_register_all();
3216 #endif
3217 #if CONFIG_AVFILTER
3218     avfilter_register_all();
3219 #endif
3220     av_register_all();
3221     avformat_network_init();
3222
3223     init_opts();
3224
3225     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
3226     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
3227
3228     show_banner(argc, argv, options);
3229
3230     parse_options(NULL, argc, argv, options, opt_input_file);
3231
3232     if (!input_filename) {
3233         show_usage();
3234         fprintf(stderr, "An input file must be specified\n");
3235         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3236         exit(1);
3237     }
3238
3239     if (display_disable) {
3240         video_disable = 1;
3241     }
3242     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3243     if (audio_disable)
3244         flags &= ~SDL_INIT_AUDIO;
3245 #if !defined(__MINGW32__) && !defined(__APPLE__)
3246     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3247 #endif
3248     if (SDL_Init (flags)) {
3249         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3250         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3251         exit(1);
3252     }
3253
3254     if (!display_disable) {
3255 #if HAVE_SDL_VIDEO_SIZE
3256         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3257         fs_screen_width = vi->current_w;
3258         fs_screen_height = vi->current_h;
3259 #endif
3260     }
3261
3262     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3263     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3264     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3265
3266     if (av_lockmgr_register(lockmgr)) {
3267         fprintf(stderr, "Could not initialize lock manager!\n");
3268         do_exit(NULL);
3269     }
3270
3271     av_init_packet(&flush_pkt);
3272     flush_pkt.data = (char *)(intptr_t)"FLUSH";
3273
3274     is = stream_open(input_filename, file_iformat);
3275     if (!is) {
3276         fprintf(stderr, "Failed to initialize VideoState!\n");
3277         do_exit(NULL);
3278     }
3279
3280     event_loop(is);
3281
3282     /* never returns */
3283
3284     return 0;
3285 }