lavfi: merge avfiltergraph.h into avfilter.h
[ffmpeg.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/buffersink.h"
45 # include "libavfilter/buffersrc.h"
46 #endif
47
48 #include "cmdutils.h"
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #ifdef __MINGW32__
54 #undef main /* We don't want SDL to override our main() */
55 #endif
56
57 #include <assert.h>
58
59 const char program_name[] = "avplay";
60 const int program_birth_year = 2003;
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int64_t sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;             // presentation timestamp for this picture
102     double target_clock;    // av_gettime() time at which this should be displayed ideally
103     int64_t pos;            // byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     int reallocate;
108     enum AVPixelFormat pix_fmt;
109
110     AVRational sar;
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
155     uint8_t *audio_buf;
156     uint8_t *audio_buf1;
157     unsigned int audio_buf_size; /* in bytes */
158     int audio_buf_index; /* in bytes */
159     AVPacket audio_pkt_temp;
160     AVPacket audio_pkt;
161     enum AVSampleFormat sdl_sample_fmt;
162     uint64_t sdl_channel_layout;
163     int sdl_channels;
164     int sdl_sample_rate;
165     enum AVSampleFormat resample_sample_fmt;
166     uint64_t resample_channel_layout;
167     int resample_sample_rate;
168     AVAudioResampleContext *avr;
169     AVFrame *frame;
170
171     int show_audio; /* if true, display audio samples */
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;      // current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     //    QETimer *video_timer;
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212     PtsCorrectionContext pts_ctx;
213
214 #if CONFIG_AVFILTER
215     AVFilterContext *in_video_filter;   // the first filter in the video chain
216     AVFilterContext *out_video_filter;  // the last filter in the video chain
217 #endif
218
219     float skip_frames;
220     float skip_frames_index;
221     int refresh;
222 } VideoState;
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width  = 0;
231 static int screen_height = 0;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB] = {
235     [AVMEDIA_TYPE_AUDIO]    = -1,
236     [AVMEDIA_TYPE_VIDEO]    = -1,
237     [AVMEDIA_TYPE_SUBTITLE] = -1,
238 };
239 static int seek_by_bytes = -1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug_mv = 0;
246 static int step = 0;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts = -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop = 1;
260 static int framedrop = 1;
261 static int infinite_buffer = 0;
262
263 static int rdftspeed = 20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
282
283 /* packet queue handling */
284 static void packet_queue_init(PacketQueue *q)
285 {
286     memset(q, 0, sizeof(PacketQueue));
287     q->mutex = SDL_CreateMutex();
288     q->cond = SDL_CreateCond();
289     packet_queue_put(q, &flush_pkt);
290 }
291
292 static void packet_queue_flush(PacketQueue *q)
293 {
294     AVPacketList *pkt, *pkt1;
295
296     SDL_LockMutex(q->mutex);
297     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
298         pkt1 = pkt->next;
299         av_free_packet(&pkt->pkt);
300         av_freep(&pkt);
301     }
302     q->last_pkt = NULL;
303     q->first_pkt = NULL;
304     q->nb_packets = 0;
305     q->size = 0;
306     SDL_UnlockMutex(q->mutex);
307 }
308
309 static void packet_queue_end(PacketQueue *q)
310 {
311     packet_queue_flush(q);
312     SDL_DestroyMutex(q->mutex);
313     SDL_DestroyCond(q->cond);
314 }
315
316 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
317 {
318     AVPacketList *pkt1;
319
320     /* duplicate the packet */
321     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
322         return -1;
323
324     pkt1 = av_malloc(sizeof(AVPacketList));
325     if (!pkt1)
326         return -1;
327     pkt1->pkt = *pkt;
328     pkt1->next = NULL;
329
330
331     SDL_LockMutex(q->mutex);
332
333     if (!q->last_pkt)
334
335         q->first_pkt = pkt1;
336     else
337         q->last_pkt->next = pkt1;
338     q->last_pkt = pkt1;
339     q->nb_packets++;
340     q->size += pkt1->pkt.size + sizeof(*pkt1);
341     /* XXX: should duplicate packet data in DV case */
342     SDL_CondSignal(q->cond);
343
344     SDL_UnlockMutex(q->mutex);
345     return 0;
346 }
347
348 static void packet_queue_abort(PacketQueue *q)
349 {
350     SDL_LockMutex(q->mutex);
351
352     q->abort_request = 1;
353
354     SDL_CondSignal(q->cond);
355
356     SDL_UnlockMutex(q->mutex);
357 }
358
359 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
360 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
361 {
362     AVPacketList *pkt1;
363     int ret;
364
365     SDL_LockMutex(q->mutex);
366
367     for (;;) {
368         if (q->abort_request) {
369             ret = -1;
370             break;
371         }
372
373         pkt1 = q->first_pkt;
374         if (pkt1) {
375             q->first_pkt = pkt1->next;
376             if (!q->first_pkt)
377                 q->last_pkt = NULL;
378             q->nb_packets--;
379             q->size -= pkt1->pkt.size + sizeof(*pkt1);
380             *pkt = pkt1->pkt;
381             av_free(pkt1);
382             ret = 1;
383             break;
384         } else if (!block) {
385             ret = 0;
386             break;
387         } else {
388             SDL_CondWait(q->cond, q->mutex);
389         }
390     }
391     SDL_UnlockMutex(q->mutex);
392     return ret;
393 }
394
395 static inline void fill_rectangle(SDL_Surface *screen,
396                                   int x, int y, int w, int h, int color)
397 {
398     SDL_Rect rect;
399     rect.x = x;
400     rect.y = y;
401     rect.w = w;
402     rect.h = h;
403     SDL_FillRect(screen, &rect, color);
404 }
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411     unsigned int v = ((const uint32_t *)(s))[0];\
412     a = (v >> 24) & 0xff;\
413     r = (v >> 16) & 0xff;\
414     g = (v >> 8) & 0xff;\
415     b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421     a = (val >> 24) & 0xff;\
422     y = (val >> 16) & 0xff;\
423     u = (val >> 8) & 0xff;\
424     v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437     int wrap, wrap3, width2, skip2;
438     int y, u, v, a, u1, v1, a1, w, h;
439     uint8_t *lum, *cb, *cr;
440     const uint8_t *p;
441     const uint32_t *pal;
442     int dstx, dsty, dstw, dsth;
443
444     dstw = av_clip(rect->w, 0, imgw);
445     dsth = av_clip(rect->h, 0, imgh);
446     dstx = av_clip(rect->x, 0, imgw - dstw);
447     dsty = av_clip(rect->y, 0, imgh - dsth);
448     lum = dst->data[0] + dsty * dst->linesize[0];
449     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451
452     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
453     skip2 = dstx >> 1;
454     wrap = dst->linesize[0];
455     wrap3 = rect->pict.linesize[0];
456     p = rect->pict.data[0];
457     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
458
459     if (dsty & 1) {
460         lum += dstx;
461         cb += skip2;
462         cr += skip2;
463
464         if (dstx & 1) {
465             YUVA_IN(y, u, v, a, p, pal);
466             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469             cb++;
470             cr++;
471             lum++;
472             p += BPP;
473         }
474         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
475             YUVA_IN(y, u, v, a, p, pal);
476             u1 = u;
477             v1 = v;
478             a1 = a;
479             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
481             YUVA_IN(y, u, v, a, p + BPP, pal);
482             u1 += u;
483             v1 += v;
484             a1 += a;
485             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488             cb++;
489             cr++;
490             p += 2 * BPP;
491             lum += 2;
492         }
493         if (w) {
494             YUVA_IN(y, u, v, a, p, pal);
495             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498             p++;
499             lum++;
500         }
501         p += wrap3 - dstw * BPP;
502         lum += wrap - dstw - dstx;
503         cb += dst->linesize[1] - width2 - skip2;
504         cr += dst->linesize[2] - width2 - skip2;
505     }
506     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
507         lum += dstx;
508         cb += skip2;
509         cr += skip2;
510
511         if (dstx & 1) {
512             YUVA_IN(y, u, v, a, p, pal);
513             u1 = u;
514             v1 = v;
515             a1 = a;
516             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517             p += wrap3;
518             lum += wrap;
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 += u;
521             v1 += v;
522             a1 += a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
525             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
526             cb++;
527             cr++;
528             p += -wrap3 + BPP;
529             lum += -wrap + 1;
530         }
531         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
532             YUVA_IN(y, u, v, a, p, pal);
533             u1 = u;
534             v1 = v;
535             a1 = a;
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
538             YUVA_IN(y, u, v, a, p + BPP, pal);
539             u1 += u;
540             v1 += v;
541             a1 += a;
542             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
543             p += wrap3;
544             lum += wrap;
545
546             YUVA_IN(y, u, v, a, p, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
551
552             YUVA_IN(y, u, v, a, p + BPP, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
557
558             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
559             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
560
561             cb++;
562             cr++;
563             p += -wrap3 + 2 * BPP;
564             lum += -wrap + 2;
565         }
566         if (w) {
567             YUVA_IN(y, u, v, a, p, pal);
568             u1 = u;
569             v1 = v;
570             a1 = a;
571             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572             p += wrap3;
573             lum += wrap;
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 += u;
576             v1 += v;
577             a1 += a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
580             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
581             cb++;
582             cr++;
583             p += -wrap3 + BPP;
584             lum += -wrap + 1;
585         }
586         p += wrap3 + (wrap3 - dstw * BPP);
587         lum += wrap + (wrap - dstw - dstx);
588         cb += dst->linesize[1] - width2 - skip2;
589         cr += dst->linesize[2] - width2 - skip2;
590     }
591     /* handle odd height */
592     if (h) {
593         lum += dstx;
594         cb += skip2;
595         cr += skip2;
596
597         if (dstx & 1) {
598             YUVA_IN(y, u, v, a, p, pal);
599             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
601             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
602             cb++;
603             cr++;
604             lum++;
605             p += BPP;
606         }
607         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
614             YUVA_IN(y, u, v, a, p + BPP, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
621             cb++;
622             cr++;
623             p += 2 * BPP;
624             lum += 2;
625         }
626         if (w) {
627             YUVA_IN(y, u, v, a, p, pal);
628             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631         }
632     }
633 }
634
635 static void free_subpicture(SubPicture *sp)
636 {
637     avsubtitle_free(&sp->sub);
638 }
639
640 static void video_image_display(VideoState *is)
641 {
642     VideoPicture *vp;
643     SubPicture *sp;
644     AVPicture pict;
645     float aspect_ratio;
646     int width, height, x, y;
647     SDL_Rect rect;
648     int i;
649
650     vp = &is->pictq[is->pictq_rindex];
651     if (vp->bmp) {
652 #if CONFIG_AVFILTER
653          if (!vp->sar.num)
654              aspect_ratio = 0;
655          else
656              aspect_ratio = av_q2d(vp->sar);
657 #else
658
659         /* XXX: use variable in the frame */
660         if (is->video_st->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
662         else if (is->video_st->codec->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
664         else
665             aspect_ratio = 0;
666 #endif
667         if (aspect_ratio <= 0.0)
668             aspect_ratio = 1.0;
669         aspect_ratio *= (float)vp->width / (float)vp->height;
670
671         if (is->subtitle_st)
672         {
673             if (is->subpq_size > 0)
674             {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
678                 {
679                     SDL_LockYUVOverlay (vp->bmp);
680
681                     pict.data[0] = vp->bmp->pixels[0];
682                     pict.data[1] = vp->bmp->pixels[2];
683                     pict.data[2] = vp->bmp->pixels[1];
684
685                     pict.linesize[0] = vp->bmp->pitches[0];
686                     pict.linesize[1] = vp->bmp->pitches[2];
687                     pict.linesize[2] = vp->bmp->pitches[1];
688
689                     for (i = 0; i < sp->sub.num_rects; i++)
690                         blend_subrect(&pict, sp->sub.rects[i],
691                                       vp->bmp->w, vp->bmp->h);
692
693                     SDL_UnlockYUVOverlay (vp->bmp);
694                 }
695             }
696         }
697
698
699         /* XXX: we suppose the screen has a 1.0 pixel ratio */
700         height = is->height;
701         width = ((int)rint(height * aspect_ratio)) & ~1;
702         if (width > is->width) {
703             width = is->width;
704             height = ((int)rint(width / aspect_ratio)) & ~1;
705         }
706         x = (is->width - width) / 2;
707         y = (is->height - height) / 2;
708         is->no_background = 0;
709         rect.x = is->xleft + x;
710         rect.y = is->ytop  + y;
711         rect.w = width;
712         rect.h = height;
713         SDL_DisplayYUVOverlay(vp->bmp, &rect);
714     }
715 }
716
717 /* get the current audio output buffer size, in samples. With SDL, we
718    cannot have a precise information */
719 static int audio_write_get_buf_size(VideoState *is)
720 {
721     return is->audio_buf_size - is->audio_buf_index;
722 }
723
724 static inline int compute_mod(int a, int b)
725 {
726     a = a % b;
727     if (a >= 0)
728         return a;
729     else
730         return a + b;
731 }
732
733 static void video_audio_display(VideoState *s)
734 {
735     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
736     int ch, channels, h, h2, bgcolor, fgcolor;
737     int16_t time_diff;
738     int rdft_bits, nb_freq;
739
740     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
741         ;
742     nb_freq = 1 << (rdft_bits - 1);
743
744     /* compute display index : center on currently output samples */
745     channels = s->sdl_channels;
746     nb_display_channels = channels;
747     if (!s->paused) {
748         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
749         n = 2 * channels;
750         delay = audio_write_get_buf_size(s);
751         delay /= n;
752
753         /* to be more precise, we take into account the time spent since
754            the last buffer computation */
755         if (audio_callback_time) {
756             time_diff = av_gettime() - audio_callback_time;
757             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
758         }
759
760         delay += 2 * data_used;
761         if (delay < data_used)
762             delay = data_used;
763
764         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
765         if (s->show_audio == 1) {
766             h = INT_MIN;
767             for (i = 0; i < 1000; i += channels) {
768                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
769                 int a = s->sample_array[idx];
770                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
771                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
772                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
773                 int score = a - d;
774                 if (h < score && (b ^ c) < 0) {
775                     h = score;
776                     i_start = idx;
777                 }
778             }
779         }
780
781         s->last_i_start = i_start;
782     } else {
783         i_start = s->last_i_start;
784     }
785
786     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
787     if (s->show_audio == 1) {
788         fill_rectangle(screen,
789                        s->xleft, s->ytop, s->width, s->height,
790                        bgcolor);
791
792         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
793
794         /* total height for one channel */
795         h = s->height / nb_display_channels;
796         /* graph height / 2 */
797         h2 = (h * 9) / 20;
798         for (ch = 0; ch < nb_display_channels; ch++) {
799             i = i_start + ch;
800             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
801             for (x = 0; x < s->width; x++) {
802                 y = (s->sample_array[i] * h2) >> 15;
803                 if (y < 0) {
804                     y = -y;
805                     ys = y1 - y;
806                 } else {
807                     ys = y1;
808                 }
809                 fill_rectangle(screen,
810                                s->xleft + x, ys, 1, y,
811                                fgcolor);
812                 i += channels;
813                 if (i >= SAMPLE_ARRAY_SIZE)
814                     i -= SAMPLE_ARRAY_SIZE;
815             }
816         }
817
818         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
819
820         for (ch = 1; ch < nb_display_channels; ch++) {
821             y = s->ytop + ch * h;
822             fill_rectangle(screen,
823                            s->xleft, y, s->width, 1,
824                            fgcolor);
825         }
826         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
827     } else {
828         nb_display_channels= FFMIN(nb_display_channels, 2);
829         if (rdft_bits != s->rdft_bits) {
830             av_rdft_end(s->rdft);
831             av_free(s->rdft_data);
832             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
833             s->rdft_bits = rdft_bits;
834             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
835         }
836         {
837             FFTSample *data[2];
838             for (ch = 0; ch < nb_display_channels; ch++) {
839                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
840                 i = i_start + ch;
841                 for (x = 0; x < 2 * nb_freq; x++) {
842                     double w = (x-nb_freq) * (1.0 / nb_freq);
843                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
844                     i += channels;
845                     if (i >= SAMPLE_ARRAY_SIZE)
846                         i -= SAMPLE_ARRAY_SIZE;
847                 }
848                 av_rdft_calc(s->rdft, data[ch]);
849             }
850             // least efficient way to do this, we should of course directly access it but its more than fast enough
851             for (y = 0; y < s->height; y++) {
852                 double w = 1 / sqrt(nb_freq);
853                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
854                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
855                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
856                 a = FFMIN(a, 255);
857                 b = FFMIN(b, 255);
858                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
859
860                 fill_rectangle(screen,
861                             s->xpos, s->height-y, 1, 1,
862                             fgcolor);
863             }
864         }
865         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
866         s->xpos++;
867         if (s->xpos >= s->width)
868             s->xpos= s->xleft;
869     }
870 }
871
872 static int video_open(VideoState *is)
873 {
874     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
875     int w,h;
876
877     if (is_full_screen) flags |= SDL_FULLSCREEN;
878     else                flags |= SDL_RESIZABLE;
879
880     if (is_full_screen && fs_screen_width) {
881         w = fs_screen_width;
882         h = fs_screen_height;
883     } else if (!is_full_screen && screen_width) {
884         w = screen_width;
885         h = screen_height;
886 #if CONFIG_AVFILTER
887     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
888         w = is->out_video_filter->inputs[0]->w;
889         h = is->out_video_filter->inputs[0]->h;
890 #else
891     } else if (is->video_st && is->video_st->codec->width) {
892         w = is->video_st->codec->width;
893         h = is->video_st->codec->height;
894 #endif
895     } else {
896         w = 640;
897         h = 480;
898     }
899     if (screen && is->width == screen->w && screen->w == w
900        && is->height== screen->h && screen->h == h)
901         return 0;
902
903 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
904     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
905     screen = SDL_SetVideoMode(w, h, 24, flags);
906 #else
907     screen = SDL_SetVideoMode(w, h, 0, flags);
908 #endif
909     if (!screen) {
910         fprintf(stderr, "SDL: could not set video mode - exiting\n");
911         return -1;
912     }
913     if (!window_title)
914         window_title = input_filename;
915     SDL_WM_SetCaption(window_title, window_title);
916
917     is->width  = screen->w;
918     is->height = screen->h;
919
920     return 0;
921 }
922
923 /* display the current picture, if any */
924 static void video_display(VideoState *is)
925 {
926     if (!screen)
927         video_open(cur_stream);
928     if (is->audio_st && is->show_audio)
929         video_audio_display(is);
930     else if (is->video_st)
931         video_image_display(is);
932 }
933
934 static int refresh_thread(void *opaque)
935 {
936     VideoState *is= opaque;
937     while (!is->abort_request) {
938         SDL_Event event;
939         event.type = FF_REFRESH_EVENT;
940         event.user.data1 = opaque;
941         if (!is->refresh) {
942             is->refresh = 1;
943             SDL_PushEvent(&event);
944         }
945         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
946     }
947     return 0;
948 }
949
950 /* get the current audio clock value */
951 static double get_audio_clock(VideoState *is)
952 {
953     double pts;
954     int hw_buf_size, bytes_per_sec;
955     pts = is->audio_clock;
956     hw_buf_size = audio_write_get_buf_size(is);
957     bytes_per_sec = 0;
958     if (is->audio_st) {
959         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
960                         av_get_bytes_per_sample(is->sdl_sample_fmt);
961     }
962     if (bytes_per_sec)
963         pts -= (double)hw_buf_size / bytes_per_sec;
964     return pts;
965 }
966
967 /* get the current video clock value */
968 static double get_video_clock(VideoState *is)
969 {
970     if (is->paused) {
971         return is->video_current_pts;
972     } else {
973         return is->video_current_pts_drift + av_gettime() / 1000000.0;
974     }
975 }
976
977 /* get the current external clock value */
978 static double get_external_clock(VideoState *is)
979 {
980     int64_t ti;
981     ti = av_gettime();
982     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
983 }
984
985 /* get the current master clock value */
986 static double get_master_clock(VideoState *is)
987 {
988     double val;
989
990     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
991         if (is->video_st)
992             val = get_video_clock(is);
993         else
994             val = get_audio_clock(is);
995     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
996         if (is->audio_st)
997             val = get_audio_clock(is);
998         else
999             val = get_video_clock(is);
1000     } else {
1001         val = get_external_clock(is);
1002     }
1003     return val;
1004 }
1005
1006 /* seek in the stream */
1007 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1008 {
1009     if (!is->seek_req) {
1010         is->seek_pos = pos;
1011         is->seek_rel = rel;
1012         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1013         if (seek_by_bytes)
1014             is->seek_flags |= AVSEEK_FLAG_BYTE;
1015         is->seek_req = 1;
1016     }
1017 }
1018
1019 /* pause or resume the video */
1020 static void stream_pause(VideoState *is)
1021 {
1022     if (is->paused) {
1023         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1024         if (is->read_pause_return != AVERROR(ENOSYS)) {
1025             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1026         }
1027         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1028     }
1029     is->paused = !is->paused;
1030 }
1031
1032 static double compute_target_time(double frame_current_pts, VideoState *is)
1033 {
1034     double delay, sync_threshold, diff;
1035
1036     /* compute nominal delay */
1037     delay = frame_current_pts - is->frame_last_pts;
1038     if (delay <= 0 || delay >= 10.0) {
1039         /* if incorrect delay, use previous one */
1040         delay = is->frame_last_delay;
1041     } else {
1042         is->frame_last_delay = delay;
1043     }
1044     is->frame_last_pts = frame_current_pts;
1045
1046     /* update delay to follow master synchronisation source */
1047     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1048          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1049         /* if video is slave, we try to correct big delays by
1050            duplicating or deleting a frame */
1051         diff = get_video_clock(is) - get_master_clock(is);
1052
1053         /* skip or repeat frame. We take into account the
1054            delay to compute the threshold. I still don't know
1055            if it is the best guess */
1056         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1057         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1058             if (diff <= -sync_threshold)
1059                 delay = 0;
1060             else if (diff >= sync_threshold)
1061                 delay = 2 * delay;
1062         }
1063     }
1064     is->frame_timer += delay;
1065
1066     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1067             delay, frame_current_pts, -diff);
1068
1069     return is->frame_timer;
1070 }
1071
1072 /* called to display each frame */
1073 static void video_refresh_timer(void *opaque)
1074 {
1075     VideoState *is = opaque;
1076     VideoPicture *vp;
1077
1078     SubPicture *sp, *sp2;
1079
1080     if (is->video_st) {
1081 retry:
1082         if (is->pictq_size == 0) {
1083             // nothing to do, no picture to display in the que
1084         } else {
1085             double time = av_gettime() / 1000000.0;
1086             double next_target;
1087             /* dequeue the picture */
1088             vp = &is->pictq[is->pictq_rindex];
1089
1090             if (time < vp->target_clock)
1091                 return;
1092             /* update current video pts */
1093             is->video_current_pts = vp->pts;
1094             is->video_current_pts_drift = is->video_current_pts - time;
1095             is->video_current_pos = vp->pos;
1096             if (is->pictq_size > 1) {
1097                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1098                 assert(nextvp->target_clock >= vp->target_clock);
1099                 next_target= nextvp->target_clock;
1100             } else {
1101                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1102             }
1103             if (framedrop && time > next_target) {
1104                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1105                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1106                     /* update queue size and signal for next picture */
1107                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1108                         is->pictq_rindex = 0;
1109
1110                     SDL_LockMutex(is->pictq_mutex);
1111                     is->pictq_size--;
1112                     SDL_CondSignal(is->pictq_cond);
1113                     SDL_UnlockMutex(is->pictq_mutex);
1114                     goto retry;
1115                 }
1116             }
1117
1118             if (is->subtitle_st) {
1119                 if (is->subtitle_stream_changed) {
1120                     SDL_LockMutex(is->subpq_mutex);
1121
1122                     while (is->subpq_size) {
1123                         free_subpicture(&is->subpq[is->subpq_rindex]);
1124
1125                         /* update queue size and signal for next picture */
1126                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1127                             is->subpq_rindex = 0;
1128
1129                         is->subpq_size--;
1130                     }
1131                     is->subtitle_stream_changed = 0;
1132
1133                     SDL_CondSignal(is->subpq_cond);
1134                     SDL_UnlockMutex(is->subpq_mutex);
1135                 } else {
1136                     if (is->subpq_size > 0) {
1137                         sp = &is->subpq[is->subpq_rindex];
1138
1139                         if (is->subpq_size > 1)
1140                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1141                         else
1142                             sp2 = NULL;
1143
1144                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1145                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1146                         {
1147                             free_subpicture(sp);
1148
1149                             /* update queue size and signal for next picture */
1150                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1151                                 is->subpq_rindex = 0;
1152
1153                             SDL_LockMutex(is->subpq_mutex);
1154                             is->subpq_size--;
1155                             SDL_CondSignal(is->subpq_cond);
1156                             SDL_UnlockMutex(is->subpq_mutex);
1157                         }
1158                     }
1159                 }
1160             }
1161
1162             /* display picture */
1163             if (!display_disable)
1164                 video_display(is);
1165
1166             /* update queue size and signal for next picture */
1167             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1168                 is->pictq_rindex = 0;
1169
1170             SDL_LockMutex(is->pictq_mutex);
1171             is->pictq_size--;
1172             SDL_CondSignal(is->pictq_cond);
1173             SDL_UnlockMutex(is->pictq_mutex);
1174         }
1175     } else if (is->audio_st) {
1176         /* draw the next audio frame */
1177
1178         /* if only audio stream, then display the audio bars (better
1179            than nothing, just to test the implementation */
1180
1181         /* display picture */
1182         if (!display_disable)
1183             video_display(is);
1184     }
1185     if (show_status) {
1186         static int64_t last_time;
1187         int64_t cur_time;
1188         int aqsize, vqsize, sqsize;
1189         double av_diff;
1190
1191         cur_time = av_gettime();
1192         if (!last_time || (cur_time - last_time) >= 30000) {
1193             aqsize = 0;
1194             vqsize = 0;
1195             sqsize = 0;
1196             if (is->audio_st)
1197                 aqsize = is->audioq.size;
1198             if (is->video_st)
1199                 vqsize = is->videoq.size;
1200             if (is->subtitle_st)
1201                 sqsize = is->subtitleq.size;
1202             av_diff = 0;
1203             if (is->audio_st && is->video_st)
1204                 av_diff = get_audio_clock(is) - get_video_clock(is);
1205             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1206                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1207                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1208             fflush(stdout);
1209             last_time = cur_time;
1210         }
1211     }
1212 }
1213
1214 static void stream_close(VideoState *is)
1215 {
1216     VideoPicture *vp;
1217     int i;
1218     /* XXX: use a special url_shutdown call to abort parse cleanly */
1219     is->abort_request = 1;
1220     SDL_WaitThread(is->parse_tid, NULL);
1221     SDL_WaitThread(is->refresh_tid, NULL);
1222
1223     /* free all pictures */
1224     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1225         vp = &is->pictq[i];
1226         if (vp->bmp) {
1227             SDL_FreeYUVOverlay(vp->bmp);
1228             vp->bmp = NULL;
1229         }
1230     }
1231     SDL_DestroyMutex(is->pictq_mutex);
1232     SDL_DestroyCond(is->pictq_cond);
1233     SDL_DestroyMutex(is->subpq_mutex);
1234     SDL_DestroyCond(is->subpq_cond);
1235 #if !CONFIG_AVFILTER
1236     if (is->img_convert_ctx)
1237         sws_freeContext(is->img_convert_ctx);
1238 #endif
1239     av_free(is);
1240 }
1241
1242 static void do_exit(void)
1243 {
1244     if (cur_stream) {
1245         stream_close(cur_stream);
1246         cur_stream = NULL;
1247     }
1248     uninit_opts();
1249 #if CONFIG_AVFILTER
1250     avfilter_uninit();
1251 #endif
1252     avformat_network_deinit();
1253     if (show_status)
1254         printf("\n");
1255     SDL_Quit();
1256     av_log(NULL, AV_LOG_QUIET, "");
1257     exit(0);
1258 }
1259
1260 /* allocate a picture (needs to do that in main thread to avoid
1261    potential locking problems */
1262 static void alloc_picture(void *opaque)
1263 {
1264     VideoState *is = opaque;
1265     VideoPicture *vp;
1266
1267     vp = &is->pictq[is->pictq_windex];
1268
1269     if (vp->bmp)
1270         SDL_FreeYUVOverlay(vp->bmp);
1271
1272 #if CONFIG_AVFILTER
1273     vp->width   = is->out_video_filter->inputs[0]->w;
1274     vp->height  = is->out_video_filter->inputs[0]->h;
1275     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1276 #else
1277     vp->width   = is->video_st->codec->width;
1278     vp->height  = is->video_st->codec->height;
1279     vp->pix_fmt = is->video_st->codec->pix_fmt;
1280 #endif
1281
1282     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1283                                    SDL_YV12_OVERLAY,
1284                                    screen);
1285     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1286         /* SDL allocates a buffer smaller than requested if the video
1287          * overlay hardware is unable to support the requested size. */
1288         fprintf(stderr, "Error: the video system does not support an image\n"
1289                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1290                         "to reduce the image size.\n", vp->width, vp->height );
1291         do_exit();
1292     }
1293
1294     SDL_LockMutex(is->pictq_mutex);
1295     vp->allocated = 1;
1296     SDL_CondSignal(is->pictq_cond);
1297     SDL_UnlockMutex(is->pictq_mutex);
1298 }
1299
1300 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1301  * guessed if not known. */
1302 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1303 {
1304     VideoPicture *vp;
1305 #if CONFIG_AVFILTER
1306     AVPicture pict_src;
1307 #else
1308     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1309 #endif
1310     /* wait until we have space to put a new picture */
1311     SDL_LockMutex(is->pictq_mutex);
1312
1313     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1314         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1315
1316     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1317            !is->videoq.abort_request) {
1318         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1319     }
1320     SDL_UnlockMutex(is->pictq_mutex);
1321
1322     if (is->videoq.abort_request)
1323         return -1;
1324
1325     vp = &is->pictq[is->pictq_windex];
1326
1327     /* alloc or resize hardware picture buffer */
1328     if (!vp->bmp || vp->reallocate ||
1329 #if CONFIG_AVFILTER
1330         vp->width  != is->out_video_filter->inputs[0]->w ||
1331         vp->height != is->out_video_filter->inputs[0]->h) {
1332 #else
1333         vp->width != is->video_st->codec->width ||
1334         vp->height != is->video_st->codec->height) {
1335 #endif
1336         SDL_Event event;
1337
1338         vp->allocated  = 0;
1339         vp->reallocate = 0;
1340
1341         /* the allocation must be done in the main thread to avoid
1342            locking problems */
1343         event.type = FF_ALLOC_EVENT;
1344         event.user.data1 = is;
1345         SDL_PushEvent(&event);
1346
1347         /* wait until the picture is allocated */
1348         SDL_LockMutex(is->pictq_mutex);
1349         while (!vp->allocated && !is->videoq.abort_request) {
1350             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1351         }
1352         SDL_UnlockMutex(is->pictq_mutex);
1353
1354         if (is->videoq.abort_request)
1355             return -1;
1356     }
1357
1358     /* if the frame is not skipped, then display it */
1359     if (vp->bmp) {
1360         AVPicture pict = { { 0 } };
1361
1362         /* get a pointer on the bitmap */
1363         SDL_LockYUVOverlay (vp->bmp);
1364
1365         pict.data[0] = vp->bmp->pixels[0];
1366         pict.data[1] = vp->bmp->pixels[2];
1367         pict.data[2] = vp->bmp->pixels[1];
1368
1369         pict.linesize[0] = vp->bmp->pitches[0];
1370         pict.linesize[1] = vp->bmp->pitches[2];
1371         pict.linesize[2] = vp->bmp->pitches[1];
1372
1373 #if CONFIG_AVFILTER
1374         pict_src.data[0] = src_frame->data[0];
1375         pict_src.data[1] = src_frame->data[1];
1376         pict_src.data[2] = src_frame->data[2];
1377
1378         pict_src.linesize[0] = src_frame->linesize[0];
1379         pict_src.linesize[1] = src_frame->linesize[1];
1380         pict_src.linesize[2] = src_frame->linesize[2];
1381
1382         // FIXME use direct rendering
1383         av_picture_copy(&pict, &pict_src,
1384                         vp->pix_fmt, vp->width, vp->height);
1385 #else
1386         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1387         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1388             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1389             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1390         if (is->img_convert_ctx == NULL) {
1391             fprintf(stderr, "Cannot initialize the conversion context\n");
1392             exit(1);
1393         }
1394         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1395                   0, vp->height, pict.data, pict.linesize);
1396 #endif
1397         /* update the bitmap content */
1398         SDL_UnlockYUVOverlay(vp->bmp);
1399
1400         vp->pts = pts;
1401         vp->pos = pos;
1402
1403         /* now we can update the picture count */
1404         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1405             is->pictq_windex = 0;
1406         SDL_LockMutex(is->pictq_mutex);
1407         vp->target_clock = compute_target_time(vp->pts, is);
1408
1409         is->pictq_size++;
1410         SDL_UnlockMutex(is->pictq_mutex);
1411     }
1412     return 0;
1413 }
1414
1415 /* Compute the exact PTS for the picture if it is omitted in the stream.
1416  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1417 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1418 {
1419     double frame_delay, pts;
1420     int ret;
1421
1422     pts = pts1;
1423
1424     if (pts != 0) {
1425         /* update video clock with pts, if present */
1426         is->video_clock = pts;
1427     } else {
1428         pts = is->video_clock;
1429     }
1430     /* update video clock for next frame */
1431     frame_delay = av_q2d(is->video_st->codec->time_base);
1432     /* for MPEG2, the frame can be repeated, so we update the
1433        clock accordingly */
1434     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1435     is->video_clock += frame_delay;
1436
1437     ret = queue_picture(is, src_frame, pts, pos);
1438     av_frame_unref(src_frame);
1439     return ret;
1440 }
1441
1442 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1443 {
1444     int got_picture, i;
1445
1446     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1447         return -1;
1448
1449     if (pkt->data == flush_pkt.data) {
1450         avcodec_flush_buffers(is->video_st->codec);
1451
1452         SDL_LockMutex(is->pictq_mutex);
1453         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1454         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1455             is->pictq[i].target_clock= 0;
1456         }
1457         while (is->pictq_size && !is->videoq.abort_request) {
1458             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1459         }
1460         is->video_current_pos = -1;
1461         SDL_UnlockMutex(is->pictq_mutex);
1462
1463         init_pts_correction(&is->pts_ctx);
1464         is->frame_last_pts = AV_NOPTS_VALUE;
1465         is->frame_last_delay = 0;
1466         is->frame_timer = (double)av_gettime() / 1000000.0;
1467         is->skip_frames = 1;
1468         is->skip_frames_index = 0;
1469         return 0;
1470     }
1471
1472     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1473
1474     if (got_picture) {
1475         if (decoder_reorder_pts == -1) {
1476             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1477         } else if (decoder_reorder_pts) {
1478             *pts = frame->pkt_pts;
1479         } else {
1480             *pts = frame->pkt_dts;
1481         }
1482
1483         if (*pts == AV_NOPTS_VALUE) {
1484             *pts = 0;
1485         }
1486         if (is->video_st->sample_aspect_ratio.num) {
1487             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1488         }
1489
1490         is->skip_frames_index += 1;
1491         if (is->skip_frames_index >= is->skip_frames) {
1492             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1493             return 1;
1494         }
1495         av_frame_unref(frame);
1496     }
1497     return 0;
1498 }
1499
1500 #if CONFIG_AVFILTER
1501 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1502 {
1503     char sws_flags_str[128];
1504     char buffersrc_args[256];
1505     int ret;
1506     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1507     AVCodecContext *codec = is->video_st->codec;
1508
1509     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1510     graph->scale_sws_opts = av_strdup(sws_flags_str);
1511
1512     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1513              codec->width, codec->height, codec->pix_fmt,
1514              is->video_st->time_base.num, is->video_st->time_base.den,
1515              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1516
1517
1518     if ((ret = avfilter_graph_create_filter(&filt_src,
1519                                             avfilter_get_by_name("buffer"),
1520                                             "src", buffersrc_args, NULL,
1521                                             graph)) < 0)
1522         return ret;
1523     if ((ret = avfilter_graph_create_filter(&filt_out,
1524                                             avfilter_get_by_name("buffersink"),
1525                                             "out", NULL, NULL, graph)) < 0)
1526         return ret;
1527
1528     if ((ret = avfilter_graph_create_filter(&filt_format,
1529                                             avfilter_get_by_name("format"),
1530                                             "format", "yuv420p", NULL, graph)) < 0)
1531         return ret;
1532     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1533         return ret;
1534
1535
1536     if (vfilters) {
1537         AVFilterInOut *outputs = avfilter_inout_alloc();
1538         AVFilterInOut *inputs  = avfilter_inout_alloc();
1539
1540         outputs->name    = av_strdup("in");
1541         outputs->filter_ctx = filt_src;
1542         outputs->pad_idx = 0;
1543         outputs->next    = NULL;
1544
1545         inputs->name    = av_strdup("out");
1546         inputs->filter_ctx = filt_format;
1547         inputs->pad_idx = 0;
1548         inputs->next    = NULL;
1549
1550         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1551             return ret;
1552     } else {
1553         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1554             return ret;
1555     }
1556
1557     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1558         return ret;
1559
1560     is->in_video_filter  = filt_src;
1561     is->out_video_filter = filt_out;
1562
1563     return ret;
1564 }
1565
1566 #endif  /* CONFIG_AVFILTER */
1567
1568 static int video_thread(void *arg)
1569 {
1570     AVPacket pkt = { 0 };
1571     VideoState *is = arg;
1572     AVFrame *frame = av_frame_alloc();
1573     int64_t pts_int;
1574     double pts;
1575     int ret;
1576
1577 #if CONFIG_AVFILTER
1578     AVFilterGraph *graph = avfilter_graph_alloc();
1579     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1580     int last_w = is->video_st->codec->width;
1581     int last_h = is->video_st->codec->height;
1582
1583     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1584         goto the_end;
1585     filt_in  = is->in_video_filter;
1586     filt_out = is->out_video_filter;
1587 #endif
1588
1589     for (;;) {
1590 #if CONFIG_AVFILTER
1591         AVRational tb;
1592 #endif
1593         while (is->paused && !is->videoq.abort_request)
1594             SDL_Delay(10);
1595
1596         av_free_packet(&pkt);
1597
1598         ret = get_video_frame(is, frame, &pts_int, &pkt);
1599         if (ret < 0)
1600             goto the_end;
1601
1602         if (!ret)
1603             continue;
1604
1605 #if CONFIG_AVFILTER
1606         if (   last_w != is->video_st->codec->width
1607             || last_h != is->video_st->codec->height) {
1608             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1609                     is->video_st->codec->width, is->video_st->codec->height);
1610             avfilter_graph_free(&graph);
1611             graph = avfilter_graph_alloc();
1612             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1613                 goto the_end;
1614             filt_in  = is->in_video_filter;
1615             filt_out = is->out_video_filter;
1616             last_w = is->video_st->codec->width;
1617             last_h = is->video_st->codec->height;
1618         }
1619
1620         frame->pts = pts_int;
1621         ret = av_buffersrc_add_frame(filt_in, frame);
1622         if (ret < 0)
1623             goto the_end;
1624
1625         while (ret >= 0) {
1626             ret = av_buffersink_get_frame(filt_out, frame);
1627             if (ret < 0) {
1628                 ret = 0;
1629                 break;
1630             }
1631
1632             pts_int = frame->pts;
1633             tb      = filt_out->inputs[0]->time_base;
1634             if (av_cmp_q(tb, is->video_st->time_base)) {
1635                 av_unused int64_t pts1 = pts_int;
1636                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1637                 av_dlog(NULL, "video_thread(): "
1638                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1639                         tb.num, tb.den, pts1,
1640                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1641             }
1642             pts = pts_int * av_q2d(is->video_st->time_base);
1643             ret = output_picture2(is, frame, pts, 0);
1644         }
1645 #else
1646         pts = pts_int * av_q2d(is->video_st->time_base);
1647         ret = output_picture2(is, frame, pts,  pkt.pos);
1648 #endif
1649
1650         if (ret < 0)
1651             goto the_end;
1652
1653
1654         if (step)
1655             if (cur_stream)
1656                 stream_pause(cur_stream);
1657     }
1658  the_end:
1659 #if CONFIG_AVFILTER
1660     av_freep(&vfilters);
1661     avfilter_graph_free(&graph);
1662 #endif
1663     av_free_packet(&pkt);
1664     av_frame_free(&frame);
1665     return 0;
1666 }
1667
1668 static int subtitle_thread(void *arg)
1669 {
1670     VideoState *is = arg;
1671     SubPicture *sp;
1672     AVPacket pkt1, *pkt = &pkt1;
1673     int got_subtitle;
1674     double pts;
1675     int i, j;
1676     int r, g, b, y, u, v, a;
1677
1678     for (;;) {
1679         while (is->paused && !is->subtitleq.abort_request) {
1680             SDL_Delay(10);
1681         }
1682         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1683             break;
1684
1685         if (pkt->data == flush_pkt.data) {
1686             avcodec_flush_buffers(is->subtitle_st->codec);
1687             continue;
1688         }
1689         SDL_LockMutex(is->subpq_mutex);
1690         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1691                !is->subtitleq.abort_request) {
1692             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1693         }
1694         SDL_UnlockMutex(is->subpq_mutex);
1695
1696         if (is->subtitleq.abort_request)
1697             return 0;
1698
1699         sp = &is->subpq[is->subpq_windex];
1700
1701        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1702            this packet, if any */
1703         pts = 0;
1704         if (pkt->pts != AV_NOPTS_VALUE)
1705             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1706
1707         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1708                                  &got_subtitle, pkt);
1709
1710         if (got_subtitle && sp->sub.format == 0) {
1711             sp->pts = pts;
1712
1713             for (i = 0; i < sp->sub.num_rects; i++)
1714             {
1715                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1716                 {
1717                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1718                     y = RGB_TO_Y_CCIR(r, g, b);
1719                     u = RGB_TO_U_CCIR(r, g, b, 0);
1720                     v = RGB_TO_V_CCIR(r, g, b, 0);
1721                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1722                 }
1723             }
1724
1725             /* now we can update the picture count */
1726             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1727                 is->subpq_windex = 0;
1728             SDL_LockMutex(is->subpq_mutex);
1729             is->subpq_size++;
1730             SDL_UnlockMutex(is->subpq_mutex);
1731         }
1732         av_free_packet(pkt);
1733     }
1734     return 0;
1735 }
1736
1737 /* copy samples for viewing in editor window */
1738 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1739 {
1740     int size, len;
1741
1742     size = samples_size / sizeof(short);
1743     while (size > 0) {
1744         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1745         if (len > size)
1746             len = size;
1747         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1748         samples += len;
1749         is->sample_array_index += len;
1750         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1751             is->sample_array_index = 0;
1752         size -= len;
1753     }
1754 }
1755
1756 /* return the new audio buffer size (samples can be added or deleted
1757    to get better sync if video or external master clock) */
1758 static int synchronize_audio(VideoState *is, short *samples,
1759                              int samples_size1, double pts)
1760 {
1761     int n, samples_size;
1762     double ref_clock;
1763
1764     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1765     samples_size = samples_size1;
1766
1767     /* if not master, then we try to remove or add samples to correct the clock */
1768     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1769          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1770         double diff, avg_diff;
1771         int wanted_size, min_size, max_size, nb_samples;
1772
1773         ref_clock = get_master_clock(is);
1774         diff = get_audio_clock(is) - ref_clock;
1775
1776         if (diff < AV_NOSYNC_THRESHOLD) {
1777             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1778             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1779                 /* not enough measures to have a correct estimate */
1780                 is->audio_diff_avg_count++;
1781             } else {
1782                 /* estimate the A-V difference */
1783                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1784
1785                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1786                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1787                     nb_samples = samples_size / n;
1788
1789                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1790                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1791                     if (wanted_size < min_size)
1792                         wanted_size = min_size;
1793                     else if (wanted_size > max_size)
1794                         wanted_size = max_size;
1795
1796                     /* add or remove samples to correction the synchro */
1797                     if (wanted_size < samples_size) {
1798                         /* remove samples */
1799                         samples_size = wanted_size;
1800                     } else if (wanted_size > samples_size) {
1801                         uint8_t *samples_end, *q;
1802                         int nb;
1803
1804                         /* add samples */
1805                         nb = (samples_size - wanted_size);
1806                         samples_end = (uint8_t *)samples + samples_size - n;
1807                         q = samples_end + n;
1808                         while (nb > 0) {
1809                             memcpy(q, samples_end, n);
1810                             q += n;
1811                             nb -= n;
1812                         }
1813                         samples_size = wanted_size;
1814                     }
1815                 }
1816                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1817                         diff, avg_diff, samples_size - samples_size1,
1818                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1819             }
1820         } else {
1821             /* too big difference : may be initial PTS errors, so
1822                reset A-V filter */
1823             is->audio_diff_avg_count = 0;
1824             is->audio_diff_cum       = 0;
1825         }
1826     }
1827
1828     return samples_size;
1829 }
1830
1831 /* decode one audio frame and returns its uncompressed size */
1832 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1833 {
1834     AVPacket *pkt_temp = &is->audio_pkt_temp;
1835     AVPacket *pkt = &is->audio_pkt;
1836     AVCodecContext *dec = is->audio_st->codec;
1837     int n, len1, data_size, got_frame;
1838     double pts;
1839     int new_packet = 0;
1840     int flush_complete = 0;
1841
1842     for (;;) {
1843         /* NOTE: the audio packet can contain several frames */
1844         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1845             int resample_changed, audio_resample;
1846
1847             if (!is->frame) {
1848                 if (!(is->frame = avcodec_alloc_frame()))
1849                     return AVERROR(ENOMEM);
1850             } else
1851                 avcodec_get_frame_defaults(is->frame);
1852
1853             if (flush_complete)
1854                 break;
1855             new_packet = 0;
1856             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1857             if (len1 < 0) {
1858                 /* if error, we skip the frame */
1859                 pkt_temp->size = 0;
1860                 break;
1861             }
1862
1863             pkt_temp->data += len1;
1864             pkt_temp->size -= len1;
1865
1866             if (!got_frame) {
1867                 /* stop sending empty packets if the decoder is finished */
1868                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1869                     flush_complete = 1;
1870                 continue;
1871             }
1872             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1873                                                    is->frame->nb_samples,
1874                                                    is->frame->format, 1);
1875
1876             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1877                              is->frame->channel_layout != is->sdl_channel_layout ||
1878                              is->frame->sample_rate    != is->sdl_sample_rate;
1879
1880             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1881                                is->frame->channel_layout != is->resample_channel_layout ||
1882                                is->frame->sample_rate    != is->resample_sample_rate;
1883
1884             if ((!is->avr && audio_resample) || resample_changed) {
1885                 int ret;
1886                 if (is->avr)
1887                     avresample_close(is->avr);
1888                 else if (audio_resample) {
1889                     is->avr = avresample_alloc_context();
1890                     if (!is->avr) {
1891                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1892                         break;
1893                     }
1894                 }
1895                 if (audio_resample) {
1896                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1897                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1898                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1899                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1900                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1901                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1902
1903                     if ((ret = avresample_open(is->avr)) < 0) {
1904                         fprintf(stderr, "error initializing libavresample\n");
1905                         break;
1906                     }
1907                 }
1908                 is->resample_sample_fmt     = is->frame->format;
1909                 is->resample_channel_layout = is->frame->channel_layout;
1910                 is->resample_sample_rate    = is->frame->sample_rate;
1911             }
1912
1913             if (audio_resample) {
1914                 void *tmp_out;
1915                 int out_samples, out_size, out_linesize;
1916                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1917                 int nb_samples = is->frame->nb_samples;
1918
1919                 out_size = av_samples_get_buffer_size(&out_linesize,
1920                                                       is->sdl_channels,
1921                                                       nb_samples,
1922                                                       is->sdl_sample_fmt, 0);
1923                 tmp_out = av_realloc(is->audio_buf1, out_size);
1924                 if (!tmp_out)
1925                     return AVERROR(ENOMEM);
1926                 is->audio_buf1 = tmp_out;
1927
1928                 out_samples = avresample_convert(is->avr,
1929                                                  &is->audio_buf1,
1930                                                  out_linesize, nb_samples,
1931                                                  is->frame->data,
1932                                                  is->frame->linesize[0],
1933                                                  is->frame->nb_samples);
1934                 if (out_samples < 0) {
1935                     fprintf(stderr, "avresample_convert() failed\n");
1936                     break;
1937                 }
1938                 is->audio_buf = is->audio_buf1;
1939                 data_size = out_samples * osize * is->sdl_channels;
1940             } else {
1941                 is->audio_buf = is->frame->data[0];
1942             }
1943
1944             /* if no pts, then compute it */
1945             pts = is->audio_clock;
1946             *pts_ptr = pts;
1947             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1948             is->audio_clock += (double)data_size /
1949                 (double)(n * is->sdl_sample_rate);
1950 #ifdef DEBUG
1951             {
1952                 static double last_clock;
1953                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1954                        is->audio_clock - last_clock,
1955                        is->audio_clock, pts);
1956                 last_clock = is->audio_clock;
1957             }
1958 #endif
1959             return data_size;
1960         }
1961
1962         /* free the current packet */
1963         if (pkt->data)
1964             av_free_packet(pkt);
1965         memset(pkt_temp, 0, sizeof(*pkt_temp));
1966
1967         if (is->paused || is->audioq.abort_request) {
1968             return -1;
1969         }
1970
1971         /* read next packet */
1972         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1973             return -1;
1974
1975         if (pkt->data == flush_pkt.data) {
1976             avcodec_flush_buffers(dec);
1977             flush_complete = 0;
1978         }
1979
1980         *pkt_temp = *pkt;
1981
1982         /* if update the audio clock with the pts */
1983         if (pkt->pts != AV_NOPTS_VALUE) {
1984             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1985         }
1986     }
1987 }
1988
1989 /* prepare a new audio buffer */
1990 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1991 {
1992     VideoState *is = opaque;
1993     int audio_size, len1;
1994     double pts;
1995
1996     audio_callback_time = av_gettime();
1997
1998     while (len > 0) {
1999         if (is->audio_buf_index >= is->audio_buf_size) {
2000            audio_size = audio_decode_frame(is, &pts);
2001            if (audio_size < 0) {
2002                 /* if error, just output silence */
2003                is->audio_buf      = is->silence_buf;
2004                is->audio_buf_size = sizeof(is->silence_buf);
2005            } else {
2006                if (is->show_audio)
2007                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2008                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2009                                               pts);
2010                is->audio_buf_size = audio_size;
2011            }
2012            is->audio_buf_index = 0;
2013         }
2014         len1 = is->audio_buf_size - is->audio_buf_index;
2015         if (len1 > len)
2016             len1 = len;
2017         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2018         len -= len1;
2019         stream += len1;
2020         is->audio_buf_index += len1;
2021     }
2022 }
2023
2024 /* open a given stream. Return 0 if OK */
2025 static int stream_component_open(VideoState *is, int stream_index)
2026 {
2027     AVFormatContext *ic = is->ic;
2028     AVCodecContext *avctx;
2029     AVCodec *codec;
2030     SDL_AudioSpec wanted_spec, spec;
2031     AVDictionary *opts;
2032     AVDictionaryEntry *t = NULL;
2033
2034     if (stream_index < 0 || stream_index >= ic->nb_streams)
2035         return -1;
2036     avctx = ic->streams[stream_index]->codec;
2037
2038     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2039
2040     codec = avcodec_find_decoder(avctx->codec_id);
2041     avctx->debug_mv          = debug_mv;
2042     avctx->workaround_bugs   = workaround_bugs;
2043     avctx->idct_algo         = idct;
2044     avctx->skip_frame        = skip_frame;
2045     avctx->skip_idct         = skip_idct;
2046     avctx->skip_loop_filter  = skip_loop_filter;
2047     avctx->error_concealment = error_concealment;
2048
2049     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2050
2051     if (!av_dict_get(opts, "threads", NULL, 0))
2052         av_dict_set(&opts, "threads", "auto", 0);
2053     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2054         av_dict_set(&opts, "refcounted_frames", "1", 0);
2055     if (!codec ||
2056         avcodec_open2(avctx, codec, &opts) < 0)
2057         return -1;
2058     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2059         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2060         return AVERROR_OPTION_NOT_FOUND;
2061     }
2062
2063     /* prepare audio output */
2064     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2065         is->sdl_sample_rate = avctx->sample_rate;
2066
2067         if (!avctx->channel_layout)
2068             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2069         if (!avctx->channel_layout) {
2070             fprintf(stderr, "unable to guess channel layout\n");
2071             return -1;
2072         }
2073         if (avctx->channels == 1)
2074             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2075         else
2076             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2077         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2078
2079         wanted_spec.format = AUDIO_S16SYS;
2080         wanted_spec.freq = is->sdl_sample_rate;
2081         wanted_spec.channels = is->sdl_channels;
2082         wanted_spec.silence = 0;
2083         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2084         wanted_spec.callback = sdl_audio_callback;
2085         wanted_spec.userdata = is;
2086         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2087             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2088             return -1;
2089         }
2090         is->audio_hw_buf_size = spec.size;
2091         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2092         is->resample_sample_fmt     = is->sdl_sample_fmt;
2093         is->resample_channel_layout = avctx->channel_layout;
2094         is->resample_sample_rate    = avctx->sample_rate;
2095     }
2096
2097     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2098     switch (avctx->codec_type) {
2099     case AVMEDIA_TYPE_AUDIO:
2100         is->audio_stream = stream_index;
2101         is->audio_st = ic->streams[stream_index];
2102         is->audio_buf_size  = 0;
2103         is->audio_buf_index = 0;
2104
2105         /* init averaging filter */
2106         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2107         is->audio_diff_avg_count = 0;
2108         /* since we do not have a precise anough audio fifo fullness,
2109            we correct audio sync only if larger than this threshold */
2110         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2111
2112         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2113         packet_queue_init(&is->audioq);
2114         SDL_PauseAudio(0);
2115         break;
2116     case AVMEDIA_TYPE_VIDEO:
2117         is->video_stream = stream_index;
2118         is->video_st = ic->streams[stream_index];
2119
2120         packet_queue_init(&is->videoq);
2121         is->video_tid = SDL_CreateThread(video_thread, is);
2122         break;
2123     case AVMEDIA_TYPE_SUBTITLE:
2124         is->subtitle_stream = stream_index;
2125         is->subtitle_st = ic->streams[stream_index];
2126         packet_queue_init(&is->subtitleq);
2127
2128         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2129         break;
2130     default:
2131         break;
2132     }
2133     return 0;
2134 }
2135
2136 static void stream_component_close(VideoState *is, int stream_index)
2137 {
2138     AVFormatContext *ic = is->ic;
2139     AVCodecContext *avctx;
2140
2141     if (stream_index < 0 || stream_index >= ic->nb_streams)
2142         return;
2143     avctx = ic->streams[stream_index]->codec;
2144
2145     switch (avctx->codec_type) {
2146     case AVMEDIA_TYPE_AUDIO:
2147         packet_queue_abort(&is->audioq);
2148
2149         SDL_CloseAudio();
2150
2151         packet_queue_end(&is->audioq);
2152         av_free_packet(&is->audio_pkt);
2153         if (is->avr)
2154             avresample_free(&is->avr);
2155         av_freep(&is->audio_buf1);
2156         is->audio_buf = NULL;
2157         avcodec_free_frame(&is->frame);
2158
2159         if (is->rdft) {
2160             av_rdft_end(is->rdft);
2161             av_freep(&is->rdft_data);
2162             is->rdft = NULL;
2163             is->rdft_bits = 0;
2164         }
2165         break;
2166     case AVMEDIA_TYPE_VIDEO:
2167         packet_queue_abort(&is->videoq);
2168
2169         /* note: we also signal this mutex to make sure we deblock the
2170            video thread in all cases */
2171         SDL_LockMutex(is->pictq_mutex);
2172         SDL_CondSignal(is->pictq_cond);
2173         SDL_UnlockMutex(is->pictq_mutex);
2174
2175         SDL_WaitThread(is->video_tid, NULL);
2176
2177         packet_queue_end(&is->videoq);
2178         break;
2179     case AVMEDIA_TYPE_SUBTITLE:
2180         packet_queue_abort(&is->subtitleq);
2181
2182         /* note: we also signal this mutex to make sure we deblock the
2183            video thread in all cases */
2184         SDL_LockMutex(is->subpq_mutex);
2185         is->subtitle_stream_changed = 1;
2186
2187         SDL_CondSignal(is->subpq_cond);
2188         SDL_UnlockMutex(is->subpq_mutex);
2189
2190         SDL_WaitThread(is->subtitle_tid, NULL);
2191
2192         packet_queue_end(&is->subtitleq);
2193         break;
2194     default:
2195         break;
2196     }
2197
2198     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2199     avcodec_close(avctx);
2200     switch (avctx->codec_type) {
2201     case AVMEDIA_TYPE_AUDIO:
2202         is->audio_st = NULL;
2203         is->audio_stream = -1;
2204         break;
2205     case AVMEDIA_TYPE_VIDEO:
2206         is->video_st = NULL;
2207         is->video_stream = -1;
2208         break;
2209     case AVMEDIA_TYPE_SUBTITLE:
2210         is->subtitle_st = NULL;
2211         is->subtitle_stream = -1;
2212         break;
2213     default:
2214         break;
2215     }
2216 }
2217
2218 /* since we have only one decoding thread, we can use a global
2219    variable instead of a thread local variable */
2220 static VideoState *global_video_state;
2221
2222 static int decode_interrupt_cb(void *ctx)
2223 {
2224     return global_video_state && global_video_state->abort_request;
2225 }
2226
2227 /* this thread gets the stream from the disk or the network */
2228 static int decode_thread(void *arg)
2229 {
2230     VideoState *is = arg;
2231     AVFormatContext *ic = NULL;
2232     int err, i, ret;
2233     int st_index[AVMEDIA_TYPE_NB];
2234     AVPacket pkt1, *pkt = &pkt1;
2235     int eof = 0;
2236     int pkt_in_play_range = 0;
2237     AVDictionaryEntry *t;
2238     AVDictionary **opts;
2239     int orig_nb_streams;
2240
2241     memset(st_index, -1, sizeof(st_index));
2242     is->video_stream = -1;
2243     is->audio_stream = -1;
2244     is->subtitle_stream = -1;
2245
2246     global_video_state = is;
2247
2248     ic = avformat_alloc_context();
2249     ic->interrupt_callback.callback = decode_interrupt_cb;
2250     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2251     if (err < 0) {
2252         print_error(is->filename, err);
2253         ret = -1;
2254         goto fail;
2255     }
2256     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2257         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2258         ret = AVERROR_OPTION_NOT_FOUND;
2259         goto fail;
2260     }
2261     is->ic = ic;
2262
2263     if (genpts)
2264         ic->flags |= AVFMT_FLAG_GENPTS;
2265
2266     opts = setup_find_stream_info_opts(ic, codec_opts);
2267     orig_nb_streams = ic->nb_streams;
2268
2269     err = avformat_find_stream_info(ic, opts);
2270     if (err < 0) {
2271         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2272         ret = -1;
2273         goto fail;
2274     }
2275     for (i = 0; i < orig_nb_streams; i++)
2276         av_dict_free(&opts[i]);
2277     av_freep(&opts);
2278
2279     if (ic->pb)
2280         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2281
2282     if (seek_by_bytes < 0)
2283         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2284
2285     /* if seeking requested, we execute it */
2286     if (start_time != AV_NOPTS_VALUE) {
2287         int64_t timestamp;
2288
2289         timestamp = start_time;
2290         /* add the stream start time */
2291         if (ic->start_time != AV_NOPTS_VALUE)
2292             timestamp += ic->start_time;
2293         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2294         if (ret < 0) {
2295             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2296                     is->filename, (double)timestamp / AV_TIME_BASE);
2297         }
2298     }
2299
2300     for (i = 0; i < ic->nb_streams; i++)
2301         ic->streams[i]->discard = AVDISCARD_ALL;
2302     if (!video_disable)
2303         st_index[AVMEDIA_TYPE_VIDEO] =
2304             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2305                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2306     if (!audio_disable)
2307         st_index[AVMEDIA_TYPE_AUDIO] =
2308             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2309                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2310                                 st_index[AVMEDIA_TYPE_VIDEO],
2311                                 NULL, 0);
2312     if (!video_disable)
2313         st_index[AVMEDIA_TYPE_SUBTITLE] =
2314             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2315                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2316                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2317                                  st_index[AVMEDIA_TYPE_AUDIO] :
2318                                  st_index[AVMEDIA_TYPE_VIDEO]),
2319                                 NULL, 0);
2320     if (show_status) {
2321         av_dump_format(ic, 0, is->filename, 0);
2322     }
2323
2324     /* open the streams */
2325     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2326         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2327     }
2328
2329     ret = -1;
2330     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2331         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2332     }
2333     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2334     if (ret < 0) {
2335         if (!display_disable)
2336             is->show_audio = 2;
2337     }
2338
2339     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2340         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2341     }
2342
2343     if (is->video_stream < 0 && is->audio_stream < 0) {
2344         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2345         ret = -1;
2346         goto fail;
2347     }
2348
2349     for (;;) {
2350         if (is->abort_request)
2351             break;
2352         if (is->paused != is->last_paused) {
2353             is->last_paused = is->paused;
2354             if (is->paused)
2355                 is->read_pause_return = av_read_pause(ic);
2356             else
2357                 av_read_play(ic);
2358         }
2359 #if CONFIG_RTSP_DEMUXER
2360         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2361             /* wait 10 ms to avoid trying to get another packet */
2362             /* XXX: horrible */
2363             SDL_Delay(10);
2364             continue;
2365         }
2366 #endif
2367         if (is->seek_req) {
2368             int64_t seek_target = is->seek_pos;
2369             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2370             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2371 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2372 //      of the seek_pos/seek_rel variables
2373
2374             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2375             if (ret < 0) {
2376                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2377             } else {
2378                 if (is->audio_stream >= 0) {
2379                     packet_queue_flush(&is->audioq);
2380                     packet_queue_put(&is->audioq, &flush_pkt);
2381                 }
2382                 if (is->subtitle_stream >= 0) {
2383                     packet_queue_flush(&is->subtitleq);
2384                     packet_queue_put(&is->subtitleq, &flush_pkt);
2385                 }
2386                 if (is->video_stream >= 0) {
2387                     packet_queue_flush(&is->videoq);
2388                     packet_queue_put(&is->videoq, &flush_pkt);
2389                 }
2390             }
2391             is->seek_req = 0;
2392             eof = 0;
2393         }
2394
2395         /* if the queue are full, no need to read more */
2396         if (!infinite_buffer &&
2397               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2398             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2399                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2400                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2401             /* wait 10 ms */
2402             SDL_Delay(10);
2403             continue;
2404         }
2405         if (eof) {
2406             if (is->video_stream >= 0) {
2407                 av_init_packet(pkt);
2408                 pkt->data = NULL;
2409                 pkt->size = 0;
2410                 pkt->stream_index = is->video_stream;
2411                 packet_queue_put(&is->videoq, pkt);
2412             }
2413             if (is->audio_stream >= 0 &&
2414                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2415                 av_init_packet(pkt);
2416                 pkt->data = NULL;
2417                 pkt->size = 0;
2418                 pkt->stream_index = is->audio_stream;
2419                 packet_queue_put(&is->audioq, pkt);
2420             }
2421             SDL_Delay(10);
2422             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2423                 if (loop != 1 && (!loop || --loop)) {
2424                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2425                 } else if (autoexit) {
2426                     ret = AVERROR_EOF;
2427                     goto fail;
2428                 }
2429             }
2430             continue;
2431         }
2432         ret = av_read_frame(ic, pkt);
2433         if (ret < 0) {
2434             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2435                 eof = 1;
2436             if (ic->pb && ic->pb->error)
2437                 break;
2438             SDL_Delay(100); /* wait for user event */
2439             continue;
2440         }
2441         /* check if packet is in play range specified by user, then queue, otherwise discard */
2442         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2443                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2444                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2445                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2446                 <= ((double)duration / 1000000);
2447         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2448             packet_queue_put(&is->audioq, pkt);
2449         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2450             packet_queue_put(&is->videoq, pkt);
2451         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2452             packet_queue_put(&is->subtitleq, pkt);
2453         } else {
2454             av_free_packet(pkt);
2455         }
2456     }
2457     /* wait until the end */
2458     while (!is->abort_request) {
2459         SDL_Delay(100);
2460     }
2461
2462     ret = 0;
2463  fail:
2464     /* disable interrupting */
2465     global_video_state = NULL;
2466
2467     /* close each stream */
2468     if (is->audio_stream >= 0)
2469         stream_component_close(is, is->audio_stream);
2470     if (is->video_stream >= 0)
2471         stream_component_close(is, is->video_stream);
2472     if (is->subtitle_stream >= 0)
2473         stream_component_close(is, is->subtitle_stream);
2474     if (is->ic) {
2475         avformat_close_input(&is->ic);
2476     }
2477
2478     if (ret != 0) {
2479         SDL_Event event;
2480
2481         event.type = FF_QUIT_EVENT;
2482         event.user.data1 = is;
2483         SDL_PushEvent(&event);
2484     }
2485     return 0;
2486 }
2487
2488 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2489 {
2490     VideoState *is;
2491
2492     is = av_mallocz(sizeof(VideoState));
2493     if (!is)
2494         return NULL;
2495     av_strlcpy(is->filename, filename, sizeof(is->filename));
2496     is->iformat = iformat;
2497     is->ytop    = 0;
2498     is->xleft   = 0;
2499
2500     /* start video display */
2501     is->pictq_mutex = SDL_CreateMutex();
2502     is->pictq_cond  = SDL_CreateCond();
2503
2504     is->subpq_mutex = SDL_CreateMutex();
2505     is->subpq_cond  = SDL_CreateCond();
2506
2507     is->av_sync_type = av_sync_type;
2508     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2509     if (!is->parse_tid) {
2510         av_free(is);
2511         return NULL;
2512     }
2513     return is;
2514 }
2515
2516 static void stream_cycle_channel(VideoState *is, int codec_type)
2517 {
2518     AVFormatContext *ic = is->ic;
2519     int start_index, stream_index;
2520     AVStream *st;
2521
2522     if (codec_type == AVMEDIA_TYPE_VIDEO)
2523         start_index = is->video_stream;
2524     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2525         start_index = is->audio_stream;
2526     else
2527         start_index = is->subtitle_stream;
2528     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2529         return;
2530     stream_index = start_index;
2531     for (;;) {
2532         if (++stream_index >= is->ic->nb_streams)
2533         {
2534             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2535             {
2536                 stream_index = -1;
2537                 goto the_end;
2538             } else
2539                 stream_index = 0;
2540         }
2541         if (stream_index == start_index)
2542             return;
2543         st = ic->streams[stream_index];
2544         if (st->codec->codec_type == codec_type) {
2545             /* check that parameters are OK */
2546             switch (codec_type) {
2547             case AVMEDIA_TYPE_AUDIO:
2548                 if (st->codec->sample_rate != 0 &&
2549                     st->codec->channels != 0)
2550                     goto the_end;
2551                 break;
2552             case AVMEDIA_TYPE_VIDEO:
2553             case AVMEDIA_TYPE_SUBTITLE:
2554                 goto the_end;
2555             default:
2556                 break;
2557             }
2558         }
2559     }
2560  the_end:
2561     stream_component_close(is, start_index);
2562     stream_component_open(is, stream_index);
2563 }
2564
2565
2566 static void toggle_full_screen(void)
2567 {
2568 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2569     /* OS X needs to empty the picture_queue */
2570     int i;
2571     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2572         cur_stream->pictq[i].reallocate = 1;
2573 #endif
2574     is_full_screen = !is_full_screen;
2575     video_open(cur_stream);
2576 }
2577
2578 static void toggle_pause(void)
2579 {
2580     if (cur_stream)
2581         stream_pause(cur_stream);
2582     step = 0;
2583 }
2584
2585 static void step_to_next_frame(void)
2586 {
2587     if (cur_stream) {
2588         /* if the stream is paused unpause it, then step */
2589         if (cur_stream->paused)
2590             stream_pause(cur_stream);
2591     }
2592     step = 1;
2593 }
2594
2595 static void toggle_audio_display(void)
2596 {
2597     if (cur_stream) {
2598         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2599         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2600         fill_rectangle(screen,
2601                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2602                        bgcolor);
2603         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2604     }
2605 }
2606
2607 /* handle an event sent by the GUI */
2608 static void event_loop(void)
2609 {
2610     SDL_Event event;
2611     double incr, pos, frac;
2612
2613     for (;;) {
2614         double x;
2615         SDL_WaitEvent(&event);
2616         switch (event.type) {
2617         case SDL_KEYDOWN:
2618             if (exit_on_keydown) {
2619                 do_exit();
2620                 break;
2621             }
2622             switch (event.key.keysym.sym) {
2623             case SDLK_ESCAPE:
2624             case SDLK_q:
2625                 do_exit();
2626                 break;
2627             case SDLK_f:
2628                 toggle_full_screen();
2629                 break;
2630             case SDLK_p:
2631             case SDLK_SPACE:
2632                 toggle_pause();
2633                 break;
2634             case SDLK_s: // S: Step to next frame
2635                 step_to_next_frame();
2636                 break;
2637             case SDLK_a:
2638                 if (cur_stream)
2639                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2640                 break;
2641             case SDLK_v:
2642                 if (cur_stream)
2643                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2644                 break;
2645             case SDLK_t:
2646                 if (cur_stream)
2647                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2648                 break;
2649             case SDLK_w:
2650                 toggle_audio_display();
2651                 break;
2652             case SDLK_LEFT:
2653                 incr = -10.0;
2654                 goto do_seek;
2655             case SDLK_RIGHT:
2656                 incr = 10.0;
2657                 goto do_seek;
2658             case SDLK_UP:
2659                 incr = 60.0;
2660                 goto do_seek;
2661             case SDLK_DOWN:
2662                 incr = -60.0;
2663             do_seek:
2664                 if (cur_stream) {
2665                     if (seek_by_bytes) {
2666                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2667                             pos = cur_stream->video_current_pos;
2668                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2669                             pos = cur_stream->audio_pkt.pos;
2670                         } else
2671                             pos = avio_tell(cur_stream->ic->pb);
2672                         if (cur_stream->ic->bit_rate)
2673                             incr *= cur_stream->ic->bit_rate / 8.0;
2674                         else
2675                             incr *= 180000.0;
2676                         pos += incr;
2677                         stream_seek(cur_stream, pos, incr, 1);
2678                     } else {
2679                         pos = get_master_clock(cur_stream);
2680                         pos += incr;
2681                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2682                     }
2683                 }
2684                 break;
2685             default:
2686                 break;
2687             }
2688             break;
2689         case SDL_MOUSEBUTTONDOWN:
2690             if (exit_on_mousedown) {
2691                 do_exit();
2692                 break;
2693             }
2694         case SDL_MOUSEMOTION:
2695             if (event.type == SDL_MOUSEBUTTONDOWN) {
2696                 x = event.button.x;
2697             } else {
2698                 if (event.motion.state != SDL_PRESSED)
2699                     break;
2700                 x = event.motion.x;
2701             }
2702             if (cur_stream) {
2703                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2704                     uint64_t size =  avio_size(cur_stream->ic->pb);
2705                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2706                 } else {
2707                     int64_t ts;
2708                     int ns, hh, mm, ss;
2709                     int tns, thh, tmm, tss;
2710                     tns  = cur_stream->ic->duration / 1000000LL;
2711                     thh  = tns / 3600;
2712                     tmm  = (tns % 3600) / 60;
2713                     tss  = (tns % 60);
2714                     frac = x / cur_stream->width;
2715                     ns   = frac * tns;
2716                     hh   = ns / 3600;
2717                     mm   = (ns % 3600) / 60;
2718                     ss   = (ns % 60);
2719                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2720                             hh, mm, ss, thh, tmm, tss);
2721                     ts = frac * cur_stream->ic->duration;
2722                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2723                         ts += cur_stream->ic->start_time;
2724                     stream_seek(cur_stream, ts, 0, 0);
2725                 }
2726             }
2727             break;
2728         case SDL_VIDEORESIZE:
2729             if (cur_stream) {
2730                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2731                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2732                 screen_width  = cur_stream->width  = event.resize.w;
2733                 screen_height = cur_stream->height = event.resize.h;
2734             }
2735             break;
2736         case SDL_QUIT:
2737         case FF_QUIT_EVENT:
2738             do_exit();
2739             break;
2740         case FF_ALLOC_EVENT:
2741             video_open(event.user.data1);
2742             alloc_picture(event.user.data1);
2743             break;
2744         case FF_REFRESH_EVENT:
2745             video_refresh_timer(event.user.data1);
2746             cur_stream->refresh = 0;
2747             break;
2748         default:
2749             break;
2750         }
2751     }
2752 }
2753
2754 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2755 {
2756     av_log(NULL, AV_LOG_ERROR,
2757            "Option '%s' has been removed, use private format options instead\n", opt);
2758     return AVERROR(EINVAL);
2759 }
2760
2761 static int opt_width(void *optctx, const char *opt, const char *arg)
2762 {
2763     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2764     return 0;
2765 }
2766
2767 static int opt_height(void *optctx, const char *opt, const char *arg)
2768 {
2769     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2770     return 0;
2771 }
2772
2773 static int opt_format(void *optctx, const char *opt, const char *arg)
2774 {
2775     file_iformat = av_find_input_format(arg);
2776     if (!file_iformat) {
2777         fprintf(stderr, "Unknown input format: %s\n", arg);
2778         return AVERROR(EINVAL);
2779     }
2780     return 0;
2781 }
2782
2783 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2784 {
2785     av_log(NULL, AV_LOG_ERROR,
2786            "Option '%s' has been removed, use private format options instead\n", opt);
2787     return AVERROR(EINVAL);
2788 }
2789
2790 static int opt_sync(void *optctx, const char *opt, const char *arg)
2791 {
2792     if (!strcmp(arg, "audio"))
2793         av_sync_type = AV_SYNC_AUDIO_MASTER;
2794     else if (!strcmp(arg, "video"))
2795         av_sync_type = AV_SYNC_VIDEO_MASTER;
2796     else if (!strcmp(arg, "ext"))
2797         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2798     else {
2799         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2800         exit(1);
2801     }
2802     return 0;
2803 }
2804
2805 static int opt_seek(void *optctx, const char *opt, const char *arg)
2806 {
2807     start_time = parse_time_or_die(opt, arg, 1);
2808     return 0;
2809 }
2810
2811 static int opt_duration(void *optctx, const char *opt, const char *arg)
2812 {
2813     duration = parse_time_or_die(opt, arg, 1);
2814     return 0;
2815 }
2816
2817 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2818 {
2819     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2820     return 0;
2821 }
2822
2823 static const OptionDef options[] = {
2824 #include "cmdutils_common_opts.h"
2825     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2826     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2827     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2828     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2829     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2830     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2831     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2832     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2833     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2834     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2835     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2836     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2837     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2838     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2839     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2840     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2841     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2842     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2843     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2844     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2845     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2846     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2847     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2848     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2849     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2850     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2851     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2852     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2853     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2854     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2855     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2856     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2857     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2858     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2859 #if CONFIG_AVFILTER
2860     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2861 #endif
2862     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2863     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2864     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2865     { NULL, },
2866 };
2867
2868 static void show_usage(void)
2869 {
2870     printf("Simple media player\n");
2871     printf("usage: %s [options] input_file\n", program_name);
2872     printf("\n");
2873 }
2874
2875 void show_help_default(const char *opt, const char *arg)
2876 {
2877     av_log_set_callback(log_callback_help);
2878     show_usage();
2879     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2880     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2881     printf("\n");
2882     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2883     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2884 #if !CONFIG_AVFILTER
2885     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2886 #endif
2887     printf("\nWhile playing:\n"
2888            "q, ESC              quit\n"
2889            "f                   toggle full screen\n"
2890            "p, SPC              pause\n"
2891            "a                   cycle audio channel\n"
2892            "v                   cycle video channel\n"
2893            "t                   cycle subtitle channel\n"
2894            "w                   show audio waves\n"
2895            "s                   activate frame-step mode\n"
2896            "left/right          seek backward/forward 10 seconds\n"
2897            "down/up             seek backward/forward 1 minute\n"
2898            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2899            );
2900 }
2901
2902 static void opt_input_file(void *optctx, const char *filename)
2903 {
2904     if (input_filename) {
2905         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2906                 filename, input_filename);
2907         exit(1);
2908     }
2909     if (!strcmp(filename, "-"))
2910         filename = "pipe:";
2911     input_filename = filename;
2912 }
2913
2914 /* Called from the main */
2915 int main(int argc, char **argv)
2916 {
2917     int flags;
2918
2919     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2920     parse_loglevel(argc, argv, options);
2921
2922     /* register all codecs, demux and protocols */
2923     avcodec_register_all();
2924 #if CONFIG_AVDEVICE
2925     avdevice_register_all();
2926 #endif
2927 #if CONFIG_AVFILTER
2928     avfilter_register_all();
2929 #endif
2930     av_register_all();
2931     avformat_network_init();
2932
2933     init_opts();
2934
2935     show_banner();
2936
2937     parse_options(NULL, argc, argv, options, opt_input_file);
2938
2939     if (!input_filename) {
2940         show_usage();
2941         fprintf(stderr, "An input file must be specified\n");
2942         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2943         exit(1);
2944     }
2945
2946     if (display_disable) {
2947         video_disable = 1;
2948     }
2949     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2950 #if !defined(__MINGW32__) && !defined(__APPLE__)
2951     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2952 #endif
2953     if (SDL_Init (flags)) {
2954         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2955         exit(1);
2956     }
2957
2958     if (!display_disable) {
2959         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2960         fs_screen_width = vi->current_w;
2961         fs_screen_height = vi->current_h;
2962     }
2963
2964     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2965     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2966     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2967
2968     av_init_packet(&flush_pkt);
2969     flush_pkt.data = "FLUSH";
2970
2971     cur_stream = stream_open(input_filename, file_iformat);
2972
2973     event_loop();
2974
2975     /* never returns */
2976
2977     return 0;
2978 }