mpjpeg: Cope with multipart lacking the initial CRLF
[ffmpeg.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/display.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/dict.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/samplefmt.h"
37 #include "libavutil/time.h"
38 #include "libavformat/avformat.h"
39 #include "libavdevice/avdevice.h"
40 #include "libswscale/swscale.h"
41 #include "libavresample/avresample.h"
42 #include "libavutil/opt.h"
43 #include "libavcodec/avfft.h"
44
45 #if CONFIG_AVFILTER
46 # include "libavfilter/avfilter.h"
47 # include "libavfilter/buffersink.h"
48 # include "libavfilter/buffersrc.h"
49 #endif
50
51 #include "cmdutils.h"
52
53 #include <SDL.h>
54 #include <SDL_thread.h>
55
56 #ifdef __MINGW32__
57 #undef main /* We don't want SDL to override our main() */
58 #endif
59
60 #include <assert.h>
61
62 const char program_name[] = "avplay";
63 const int program_birth_year = 2003;
64
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67 #define MIN_FRAMES 5
68
69 /* SDL audio buffer size, in samples. Should be small to have precise
70    A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77
78 #define FRAME_SKIP_FACTOR 0.05
79
80 /* maximum audio speed change to get correct sync */
81 #define SAMPLE_CORRECTION_PERCENT_MAX 10
82
83 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84 #define AUDIO_DIFF_AVG_NB   20
85
86 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87 #define SAMPLE_ARRAY_SIZE (2 * 65536)
88
89 static int64_t sws_flags = SWS_BICUBIC;
90
91 typedef struct PacketQueue {
92     AVPacketList *first_pkt, *last_pkt;
93     int nb_packets;
94     int size;
95     int abort_request;
96     SDL_mutex *mutex;
97     SDL_cond *cond;
98 } PacketQueue;
99
100 #define VIDEO_PICTURE_QUEUE_SIZE 2
101 #define SUBPICTURE_QUEUE_SIZE 4
102
103 typedef struct VideoPicture {
104     double pts;             // presentation timestamp for this picture
105     double target_clock;    // av_gettime_relative() time at which this should be displayed ideally
106     int64_t pos;            // byte position in file
107     SDL_Overlay *bmp;
108     int width, height; /* source height & width */
109     int allocated;
110     int reallocate;
111     enum AVPixelFormat pix_fmt;
112
113     AVRational sar;
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     int sdl_sample_rate;
168     enum AVSampleFormat resample_sample_fmt;
169     uint64_t resample_channel_layout;
170     int resample_sample_rate;
171     AVAudioResampleContext *avr;
172     AVFrame *frame;
173
174     int show_audio; /* if true, display audio samples */
175     int16_t sample_array[SAMPLE_ARRAY_SIZE];
176     int sample_array_index;
177     int last_i_start;
178     RDFTContext *rdft;
179     int rdft_bits;
180     FFTSample *rdft_data;
181     int xpos;
182
183     SDL_Thread *subtitle_tid;
184     int subtitle_stream;
185     int subtitle_stream_changed;
186     AVStream *subtitle_st;
187     PacketQueue subtitleq;
188     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
189     int subpq_size, subpq_rindex, subpq_windex;
190     SDL_mutex *subpq_mutex;
191     SDL_cond *subpq_cond;
192
193     double frame_timer;
194     double frame_last_pts;
195     double frame_last_delay;
196     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
197     int video_stream;
198     AVStream *video_st;
199     PacketQueue videoq;
200     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
201     double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
202     int64_t video_current_pos;      // current displayed file pos
203     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
204     int pictq_size, pictq_rindex, pictq_windex;
205     SDL_mutex *pictq_mutex;
206     SDL_cond *pictq_cond;
207 #if !CONFIG_AVFILTER
208     struct SwsContext *img_convert_ctx;
209 #endif
210
211     //    QETimer *video_timer;
212     char filename[1024];
213     int width, height, xleft, ytop;
214
215     PtsCorrectionContext pts_ctx;
216
217 #if CONFIG_AVFILTER
218     AVFilterContext *in_video_filter;   // the first filter in the video chain
219     AVFilterContext *out_video_filter;  // the last filter in the video chain
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int step = 0;
249 static int workaround_bugs = 1;
250 static int fast = 0;
251 static int genpts = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
256 static int error_concealment = 3;
257 static int decoder_reorder_pts = -1;
258 static int noautoexit;
259 static int exit_on_keydown;
260 static int exit_on_mousedown;
261 static int loop = 1;
262 static int framedrop = 1;
263 static int infinite_buffer = 0;
264
265 static int rdftspeed = 20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269 static int autorotate = 1;
270
271 /* current context */
272 static int is_full_screen;
273 static VideoState *cur_stream;
274 static int64_t audio_callback_time;
275
276 static AVPacket flush_pkt;
277
278 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
279 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281
282 static SDL_Surface *screen;
283
284 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285
286 /* packet queue handling */
287 static void packet_queue_init(PacketQueue *q)
288 {
289     memset(q, 0, sizeof(PacketQueue));
290     q->mutex = SDL_CreateMutex();
291     q->cond = SDL_CreateCond();
292     packet_queue_put(q, &flush_pkt);
293 }
294
295 static void packet_queue_flush(PacketQueue *q)
296 {
297     AVPacketList *pkt, *pkt1;
298
299     SDL_LockMutex(q->mutex);
300     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301         pkt1 = pkt->next;
302         av_free_packet(&pkt->pkt);
303         av_freep(&pkt);
304     }
305     q->last_pkt = NULL;
306     q->first_pkt = NULL;
307     q->nb_packets = 0;
308     q->size = 0;
309     SDL_UnlockMutex(q->mutex);
310 }
311
312 static void packet_queue_end(PacketQueue *q)
313 {
314     packet_queue_flush(q);
315     SDL_DestroyMutex(q->mutex);
316     SDL_DestroyCond(q->cond);
317 }
318
319 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320 {
321     AVPacketList *pkt1;
322
323     /* duplicate the packet */
324     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
325         return -1;
326
327     pkt1 = av_malloc(sizeof(AVPacketList));
328     if (!pkt1)
329         return -1;
330     pkt1->pkt = *pkt;
331     pkt1->next = NULL;
332
333
334     SDL_LockMutex(q->mutex);
335
336     if (!q->last_pkt)
337
338         q->first_pkt = pkt1;
339     else
340         q->last_pkt->next = pkt1;
341     q->last_pkt = pkt1;
342     q->nb_packets++;
343     q->size += pkt1->pkt.size + sizeof(*pkt1);
344     /* XXX: should duplicate packet data in DV case */
345     SDL_CondSignal(q->cond);
346
347     SDL_UnlockMutex(q->mutex);
348     return 0;
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for (;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #define ALPHA_BLEND(a, oldp, newp, s)\
410 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411
412 #define RGBA_IN(r, g, b, a, s)\
413 {\
414     unsigned int v = ((const uint32_t *)(s))[0];\
415     a = (v >> 24) & 0xff;\
416     r = (v >> 16) & 0xff;\
417     g = (v >> 8) & 0xff;\
418     b = v & 0xff;\
419 }
420
421 #define YUVA_IN(y, u, v, a, s, pal)\
422 {\
423     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
424     a = (val >> 24) & 0xff;\
425     y = (val >> 16) & 0xff;\
426     u = (val >> 8) & 0xff;\
427     v = val & 0xff;\
428 }
429
430 #define YUVA_OUT(d, y, u, v, a)\
431 {\
432     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433 }
434
435
436 #define BPP 1
437
438 static void blend_subrect(uint8_t *dst[4], uint16_t dst_linesize[4],
439                           const AVSubtitleRect *rect, int imgw, int imgh)
440 {
441     int wrap, wrap3, width2, skip2;
442     int y, u, v, a, u1, v1, a1, w, h;
443     uint8_t *lum, *cb, *cr;
444     const uint8_t *p;
445     const uint32_t *pal;
446     int dstx, dsty, dstw, dsth;
447
448     dstw = av_clip(rect->w, 0, imgw);
449     dsth = av_clip(rect->h, 0, imgh);
450     dstx = av_clip(rect->x, 0, imgw - dstw);
451     dsty = av_clip(rect->y, 0, imgh - dsth);
452     /* sdl has U and V inverted */
453     lum = dst[0] +  dsty       * dst_linesize[0];
454     cb  = dst[2] + (dsty >> 1) * dst_linesize[2];
455     cr  = dst[1] + (dsty >> 1) * dst_linesize[1];
456
457     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
458     skip2 = dstx >> 1;
459     wrap = dst_linesize[0];
460     wrap3 = rect->linesize[0];
461     p = rect->data[0];
462     pal = (const uint32_t *)rect->data[1];  /* Now in YCrCb! */
463
464     if (dsty & 1) {
465         lum += dstx;
466         cb += skip2;
467         cr += skip2;
468
469         if (dstx & 1) {
470             YUVA_IN(y, u, v, a, p, pal);
471             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
472             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
473             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
474             cb++;
475             cr++;
476             lum++;
477             p += BPP;
478         }
479         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
480             YUVA_IN(y, u, v, a, p, pal);
481             u1 = u;
482             v1 = v;
483             a1 = a;
484             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
485
486             YUVA_IN(y, u, v, a, p + BPP, pal);
487             u1 += u;
488             v1 += v;
489             a1 += a;
490             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
491             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
492             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
493             cb++;
494             cr++;
495             p += 2 * BPP;
496             lum += 2;
497         }
498         if (w) {
499             YUVA_IN(y, u, v, a, p, pal);
500             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
501             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
502             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
503             p++;
504             lum++;
505         }
506         p += wrap3 - dstw * BPP;
507         lum += wrap - dstw - dstx;
508         cb += dst_linesize[2] - width2 - skip2;
509         cr += dst_linesize[1] - width2 - skip2;
510     }
511     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
512         lum += dstx;
513         cb += skip2;
514         cr += skip2;
515
516         if (dstx & 1) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             p += wrap3;
523             lum += wrap;
524             YUVA_IN(y, u, v, a, p, pal);
525             u1 += u;
526             v1 += v;
527             a1 += a;
528             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
529             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531             cb++;
532             cr++;
533             p += -wrap3 + BPP;
534             lum += -wrap + 1;
535         }
536         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
537             YUVA_IN(y, u, v, a, p, pal);
538             u1 = u;
539             v1 = v;
540             a1 = a;
541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542
543             YUVA_IN(y, u, v, a, p + BPP, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
548             p += wrap3;
549             lum += wrap;
550
551             YUVA_IN(y, u, v, a, p, pal);
552             u1 += u;
553             v1 += v;
554             a1 += a;
555             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556
557             YUVA_IN(y, u, v, a, p + BPP, pal);
558             u1 += u;
559             v1 += v;
560             a1 += a;
561             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
562
563             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
564             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
565
566             cb++;
567             cr++;
568             p += -wrap3 + 2 * BPP;
569             lum += -wrap + 2;
570         }
571         if (w) {
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 = u;
574             v1 = v;
575             a1 = a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             p += wrap3;
578             lum += wrap;
579             YUVA_IN(y, u, v, a, p, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
585             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
586             cb++;
587             cr++;
588             p += -wrap3 + BPP;
589             lum += -wrap + 1;
590         }
591         p += wrap3 + (wrap3 - dstw * BPP);
592         lum += wrap + (wrap - dstw - dstx);
593         cb += dst_linesize[2] - width2 - skip2;
594         cr += dst_linesize[1] - width2 - skip2;
595     }
596     /* handle odd height */
597     if (h) {
598         lum += dstx;
599         cb += skip2;
600         cr += skip2;
601
602         if (dstx & 1) {
603             YUVA_IN(y, u, v, a, p, pal);
604             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
605             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
606             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
607             cb++;
608             cr++;
609             lum++;
610             p += BPP;
611         }
612         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
613             YUVA_IN(y, u, v, a, p, pal);
614             u1 = u;
615             v1 = v;
616             a1 = a;
617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618
619             YUVA_IN(y, u, v, a, p + BPP, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
626             cb++;
627             cr++;
628             p += 2 * BPP;
629             lum += 2;
630         }
631         if (w) {
632             YUVA_IN(y, u, v, a, p, pal);
633             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
634             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
635             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
636         }
637     }
638 }
639
640 static void free_subpicture(SubPicture *sp)
641 {
642     avsubtitle_free(&sp->sub);
643 }
644
645 static void video_image_display(VideoState *is)
646 {
647     VideoPicture *vp;
648     SubPicture *sp;
649     float aspect_ratio;
650     int width, height, x, y;
651     SDL_Rect rect;
652     int i;
653
654     vp = &is->pictq[is->pictq_rindex];
655     if (vp->bmp) {
656 #if CONFIG_AVFILTER
657          if (!vp->sar.num)
658              aspect_ratio = 0;
659          else
660              aspect_ratio = av_q2d(vp->sar);
661 #else
662
663         /* XXX: use variable in the frame */
664         if (is->video_st->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666         else if (is->video_st->codec->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668         else
669             aspect_ratio = 0;
670 #endif
671         if (aspect_ratio <= 0.0)
672             aspect_ratio = 1.0;
673         aspect_ratio *= (float)vp->width / (float)vp->height;
674
675         if (is->subtitle_st)
676         {
677             if (is->subpq_size > 0)
678             {
679                 sp = &is->subpq[is->subpq_rindex];
680
681                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
682                 {
683                     SDL_LockYUVOverlay (vp->bmp);
684
685                     for (i = 0; i < sp->sub.num_rects; i++)
686                         blend_subrect(vp->bmp->pixels, vp->bmp->pitches,
687                                       sp->sub.rects[i], vp->bmp->w, vp->bmp->h);
688
689                     SDL_UnlockYUVOverlay (vp->bmp);
690                 }
691             }
692         }
693
694
695         /* XXX: we suppose the screen has a 1.0 pixel ratio */
696         height = is->height;
697         width = ((int)rint(height * aspect_ratio)) & ~1;
698         if (width > is->width) {
699             width = is->width;
700             height = ((int)rint(width / aspect_ratio)) & ~1;
701         }
702         x = (is->width - width) / 2;
703         y = (is->height - height) / 2;
704         is->no_background = 0;
705         rect.x = is->xleft + x;
706         rect.y = is->ytop  + y;
707         rect.w = width;
708         rect.h = height;
709         SDL_DisplayYUVOverlay(vp->bmp, &rect);
710     }
711 }
712
713 /* get the current audio output buffer size, in samples. With SDL, we
714    cannot have a precise information */
715 static int audio_write_get_buf_size(VideoState *is)
716 {
717     return is->audio_buf_size - is->audio_buf_index;
718 }
719
720 static inline int compute_mod(int a, int b)
721 {
722     a = a % b;
723     if (a >= 0)
724         return a;
725     else
726         return a + b;
727 }
728
729 static void video_audio_display(VideoState *s)
730 {
731     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
732     int ch, channels, h, h2, bgcolor, fgcolor;
733     int16_t time_diff;
734     int rdft_bits, nb_freq;
735
736     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
737         ;
738     nb_freq = 1 << (rdft_bits - 1);
739
740     /* compute display index : center on currently output samples */
741     channels = s->sdl_channels;
742     nb_display_channels = channels;
743     if (!s->paused) {
744         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
745         n = 2 * channels;
746         delay = audio_write_get_buf_size(s);
747         delay /= n;
748
749         /* to be more precise, we take into account the time spent since
750            the last buffer computation */
751         if (audio_callback_time) {
752             time_diff = av_gettime_relative() - audio_callback_time;
753             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
754         }
755
756         delay += 2 * data_used;
757         if (delay < data_used)
758             delay = data_used;
759
760         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
761         if (s->show_audio == 1) {
762             h = INT_MIN;
763             for (i = 0; i < 1000; i += channels) {
764                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
765                 int a = s->sample_array[idx];
766                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
767                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
768                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
769                 int score = a - d;
770                 if (h < score && (b ^ c) < 0) {
771                     h = score;
772                     i_start = idx;
773                 }
774             }
775         }
776
777         s->last_i_start = i_start;
778     } else {
779         i_start = s->last_i_start;
780     }
781
782     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
783     if (s->show_audio == 1) {
784         fill_rectangle(screen,
785                        s->xleft, s->ytop, s->width, s->height,
786                        bgcolor);
787
788         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
789
790         /* total height for one channel */
791         h = s->height / nb_display_channels;
792         /* graph height / 2 */
793         h2 = (h * 9) / 20;
794         for (ch = 0; ch < nb_display_channels; ch++) {
795             i = i_start + ch;
796             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
797             for (x = 0; x < s->width; x++) {
798                 y = (s->sample_array[i] * h2) >> 15;
799                 if (y < 0) {
800                     y = -y;
801                     ys = y1 - y;
802                 } else {
803                     ys = y1;
804                 }
805                 fill_rectangle(screen,
806                                s->xleft + x, ys, 1, y,
807                                fgcolor);
808                 i += channels;
809                 if (i >= SAMPLE_ARRAY_SIZE)
810                     i -= SAMPLE_ARRAY_SIZE;
811             }
812         }
813
814         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
815
816         for (ch = 1; ch < nb_display_channels; ch++) {
817             y = s->ytop + ch * h;
818             fill_rectangle(screen,
819                            s->xleft, y, s->width, 1,
820                            fgcolor);
821         }
822         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
823     } else {
824         nb_display_channels= FFMIN(nb_display_channels, 2);
825         if (rdft_bits != s->rdft_bits) {
826             av_rdft_end(s->rdft);
827             av_free(s->rdft_data);
828             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
829             s->rdft_bits = rdft_bits;
830             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
831         }
832         {
833             FFTSample *data[2];
834             for (ch = 0; ch < nb_display_channels; ch++) {
835                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
836                 i = i_start + ch;
837                 for (x = 0; x < 2 * nb_freq; x++) {
838                     double w = (x-nb_freq) * (1.0 / nb_freq);
839                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
840                     i += channels;
841                     if (i >= SAMPLE_ARRAY_SIZE)
842                         i -= SAMPLE_ARRAY_SIZE;
843                 }
844                 av_rdft_calc(s->rdft, data[ch]);
845             }
846             /* Least efficient way to do this, we should of course
847              * directly access it but it is more than fast enough. */
848             for (y = 0; y < s->height; y++) {
849                 double w = 1 / sqrt(nb_freq);
850                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
851                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
852                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
853                 a = FFMIN(a, 255);
854                 b = FFMIN(b, 255);
855                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
856
857                 fill_rectangle(screen,
858                             s->xpos, s->height-y, 1, 1,
859                             fgcolor);
860             }
861         }
862         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
863         s->xpos++;
864         if (s->xpos >= s->width)
865             s->xpos= s->xleft;
866     }
867 }
868
869 static int video_open(VideoState *is)
870 {
871     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
872     int w,h;
873
874     if (is_full_screen) flags |= SDL_FULLSCREEN;
875     else                flags |= SDL_RESIZABLE;
876
877     if (is_full_screen && fs_screen_width) {
878         w = fs_screen_width;
879         h = fs_screen_height;
880     } else if (!is_full_screen && screen_width) {
881         w = screen_width;
882         h = screen_height;
883 #if CONFIG_AVFILTER
884     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
885         w = is->out_video_filter->inputs[0]->w;
886         h = is->out_video_filter->inputs[0]->h;
887 #else
888     } else if (is->video_st && is->video_st->codec->width) {
889         w = is->video_st->codec->width;
890         h = is->video_st->codec->height;
891 #endif
892     } else {
893         w = 640;
894         h = 480;
895     }
896     if (screen && is->width == screen->w && screen->w == w
897        && is->height== screen->h && screen->h == h)
898         return 0;
899
900 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
901     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
902     screen = SDL_SetVideoMode(w, h, 24, flags);
903 #else
904     screen = SDL_SetVideoMode(w, h, 0, flags);
905 #endif
906     if (!screen) {
907         fprintf(stderr, "SDL: could not set video mode - exiting\n");
908         return -1;
909     }
910     if (!window_title)
911         window_title = input_filename;
912     SDL_WM_SetCaption(window_title, window_title);
913
914     is->width  = screen->w;
915     is->height = screen->h;
916
917     return 0;
918 }
919
920 /* display the current picture, if any */
921 static void video_display(VideoState *is)
922 {
923     if (!screen)
924         video_open(cur_stream);
925     if (is->audio_st && is->show_audio)
926         video_audio_display(is);
927     else if (is->video_st)
928         video_image_display(is);
929 }
930
931 static int refresh_thread(void *opaque)
932 {
933     VideoState *is= opaque;
934     while (!is->abort_request) {
935         SDL_Event event;
936         event.type = FF_REFRESH_EVENT;
937         event.user.data1 = opaque;
938         if (!is->refresh) {
939             is->refresh = 1;
940             SDL_PushEvent(&event);
941         }
942         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
943     }
944     return 0;
945 }
946
947 /* get the current audio clock value */
948 static double get_audio_clock(VideoState *is)
949 {
950     double pts;
951     int hw_buf_size, bytes_per_sec;
952     pts = is->audio_clock;
953     hw_buf_size = audio_write_get_buf_size(is);
954     bytes_per_sec = 0;
955     if (is->audio_st) {
956         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
957                         av_get_bytes_per_sample(is->sdl_sample_fmt);
958     }
959     if (bytes_per_sec)
960         pts -= (double)hw_buf_size / bytes_per_sec;
961     return pts;
962 }
963
964 /* get the current video clock value */
965 static double get_video_clock(VideoState *is)
966 {
967     if (is->paused) {
968         return is->video_current_pts;
969     } else {
970         return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
971     }
972 }
973
974 /* get the current external clock value */
975 static double get_external_clock(VideoState *is)
976 {
977     int64_t ti;
978     ti = av_gettime_relative();
979     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
980 }
981
982 /* get the current master clock value */
983 static double get_master_clock(VideoState *is)
984 {
985     double val;
986
987     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
988         if (is->video_st)
989             val = get_video_clock(is);
990         else
991             val = get_audio_clock(is);
992     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
993         if (is->audio_st)
994             val = get_audio_clock(is);
995         else
996             val = get_video_clock(is);
997     } else {
998         val = get_external_clock(is);
999     }
1000     return val;
1001 }
1002
1003 /* seek in the stream */
1004 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1005 {
1006     if (!is->seek_req) {
1007         is->seek_pos = pos;
1008         is->seek_rel = rel;
1009         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1010         if (seek_by_bytes)
1011             is->seek_flags |= AVSEEK_FLAG_BYTE;
1012         is->seek_req = 1;
1013     }
1014 }
1015
1016 /* pause or resume the video */
1017 static void stream_pause(VideoState *is)
1018 {
1019     if (is->paused) {
1020         is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1021         if (is->read_pause_return != AVERROR(ENOSYS)) {
1022             is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
1023         }
1024         is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
1025     }
1026     is->paused = !is->paused;
1027 }
1028
1029 static double compute_target_time(double frame_current_pts, VideoState *is)
1030 {
1031     double delay, sync_threshold, diff = 0;
1032
1033     /* compute nominal delay */
1034     delay = frame_current_pts - is->frame_last_pts;
1035     if (delay <= 0 || delay >= 10.0) {
1036         /* if incorrect delay, use previous one */
1037         delay = is->frame_last_delay;
1038     } else {
1039         is->frame_last_delay = delay;
1040     }
1041     is->frame_last_pts = frame_current_pts;
1042
1043     /* update delay to follow master synchronisation source */
1044     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1045          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1046         /* if video is slave, we try to correct big delays by
1047            duplicating or deleting a frame */
1048         diff = get_video_clock(is) - get_master_clock(is);
1049
1050         /* skip or repeat frame. We take into account the
1051            delay to compute the threshold. I still don't know
1052            if it is the best guess */
1053         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1054         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1055             if (diff <= -sync_threshold)
1056                 delay = 0;
1057             else if (diff >= sync_threshold)
1058                 delay = 2 * delay;
1059         }
1060     }
1061     is->frame_timer += delay;
1062
1063     av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1064             delay, frame_current_pts, -diff);
1065
1066     return is->frame_timer;
1067 }
1068
1069 /* called to display each frame */
1070 static void video_refresh_timer(void *opaque)
1071 {
1072     VideoState *is = opaque;
1073     VideoPicture *vp;
1074
1075     SubPicture *sp, *sp2;
1076
1077     if (is->video_st) {
1078 retry:
1079         if (is->pictq_size == 0) {
1080             // nothing to do, no picture to display in the que
1081         } else {
1082             double time = av_gettime_relative() / 1000000.0;
1083             double next_target;
1084             /* dequeue the picture */
1085             vp = &is->pictq[is->pictq_rindex];
1086
1087             if (time < vp->target_clock)
1088                 return;
1089             /* update current video pts */
1090             is->video_current_pts = vp->pts;
1091             is->video_current_pts_drift = is->video_current_pts - time;
1092             is->video_current_pos = vp->pos;
1093             if (is->pictq_size > 1) {
1094                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1095                 assert(nextvp->target_clock >= vp->target_clock);
1096                 next_target= nextvp->target_clock;
1097             } else {
1098                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1099             }
1100             if (framedrop && time > next_target) {
1101                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1102                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1103                     /* update queue size and signal for next picture */
1104                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1105                         is->pictq_rindex = 0;
1106
1107                     SDL_LockMutex(is->pictq_mutex);
1108                     is->pictq_size--;
1109                     SDL_CondSignal(is->pictq_cond);
1110                     SDL_UnlockMutex(is->pictq_mutex);
1111                     goto retry;
1112                 }
1113             }
1114
1115             if (is->subtitle_st) {
1116                 if (is->subtitle_stream_changed) {
1117                     SDL_LockMutex(is->subpq_mutex);
1118
1119                     while (is->subpq_size) {
1120                         free_subpicture(&is->subpq[is->subpq_rindex]);
1121
1122                         /* update queue size and signal for next picture */
1123                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1124                             is->subpq_rindex = 0;
1125
1126                         is->subpq_size--;
1127                     }
1128                     is->subtitle_stream_changed = 0;
1129
1130                     SDL_CondSignal(is->subpq_cond);
1131                     SDL_UnlockMutex(is->subpq_mutex);
1132                 } else {
1133                     if (is->subpq_size > 0) {
1134                         sp = &is->subpq[is->subpq_rindex];
1135
1136                         if (is->subpq_size > 1)
1137                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1138                         else
1139                             sp2 = NULL;
1140
1141                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1142                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1143                         {
1144                             free_subpicture(sp);
1145
1146                             /* update queue size and signal for next picture */
1147                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1148                                 is->subpq_rindex = 0;
1149
1150                             SDL_LockMutex(is->subpq_mutex);
1151                             is->subpq_size--;
1152                             SDL_CondSignal(is->subpq_cond);
1153                             SDL_UnlockMutex(is->subpq_mutex);
1154                         }
1155                     }
1156                 }
1157             }
1158
1159             /* display picture */
1160             if (!display_disable)
1161                 video_display(is);
1162
1163             /* update queue size and signal for next picture */
1164             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1165                 is->pictq_rindex = 0;
1166
1167             SDL_LockMutex(is->pictq_mutex);
1168             is->pictq_size--;
1169             SDL_CondSignal(is->pictq_cond);
1170             SDL_UnlockMutex(is->pictq_mutex);
1171         }
1172     } else if (is->audio_st) {
1173         /* draw the next audio frame */
1174
1175         /* if only audio stream, then display the audio bars (better
1176            than nothing, just to test the implementation */
1177
1178         /* display picture */
1179         if (!display_disable)
1180             video_display(is);
1181     }
1182     if (show_status) {
1183         static int64_t last_time;
1184         int64_t cur_time;
1185         int aqsize, vqsize, sqsize;
1186         double av_diff;
1187
1188         cur_time = av_gettime_relative();
1189         if (!last_time || (cur_time - last_time) >= 30000) {
1190             aqsize = 0;
1191             vqsize = 0;
1192             sqsize = 0;
1193             if (is->audio_st)
1194                 aqsize = is->audioq.size;
1195             if (is->video_st)
1196                 vqsize = is->videoq.size;
1197             if (is->subtitle_st)
1198                 sqsize = is->subtitleq.size;
1199             av_diff = 0;
1200             if (is->audio_st && is->video_st)
1201                 av_diff = get_audio_clock(is) - get_video_clock(is);
1202             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1203                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1204                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1205             fflush(stdout);
1206             last_time = cur_time;
1207         }
1208     }
1209 }
1210
1211 static void stream_close(VideoState *is)
1212 {
1213     VideoPicture *vp;
1214     int i;
1215     /* XXX: use a special url_shutdown call to abort parse cleanly */
1216     is->abort_request = 1;
1217     SDL_WaitThread(is->parse_tid, NULL);
1218     SDL_WaitThread(is->refresh_tid, NULL);
1219
1220     /* free all pictures */
1221     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1222         vp = &is->pictq[i];
1223         if (vp->bmp) {
1224             SDL_FreeYUVOverlay(vp->bmp);
1225             vp->bmp = NULL;
1226         }
1227     }
1228     SDL_DestroyMutex(is->pictq_mutex);
1229     SDL_DestroyCond(is->pictq_cond);
1230     SDL_DestroyMutex(is->subpq_mutex);
1231     SDL_DestroyCond(is->subpq_cond);
1232 #if !CONFIG_AVFILTER
1233     if (is->img_convert_ctx)
1234         sws_freeContext(is->img_convert_ctx);
1235 #endif
1236     av_free(is);
1237 }
1238
1239 static void do_exit(void)
1240 {
1241     if (cur_stream) {
1242         stream_close(cur_stream);
1243         cur_stream = NULL;
1244     }
1245     uninit_opts();
1246     avformat_network_deinit();
1247     if (show_status)
1248         printf("\n");
1249     SDL_Quit();
1250     av_log(NULL, AV_LOG_QUIET, "");
1251     exit(0);
1252 }
1253
1254 /* allocate a picture (needs to do that in main thread to avoid
1255    potential locking problems */
1256 static void alloc_picture(void *opaque)
1257 {
1258     VideoState *is = opaque;
1259     VideoPicture *vp;
1260
1261     vp = &is->pictq[is->pictq_windex];
1262
1263     if (vp->bmp)
1264         SDL_FreeYUVOverlay(vp->bmp);
1265
1266 #if CONFIG_AVFILTER
1267     vp->width   = is->out_video_filter->inputs[0]->w;
1268     vp->height  = is->out_video_filter->inputs[0]->h;
1269     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1270 #else
1271     vp->width   = is->video_st->codec->width;
1272     vp->height  = is->video_st->codec->height;
1273     vp->pix_fmt = is->video_st->codec->pix_fmt;
1274 #endif
1275
1276     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1277                                    SDL_YV12_OVERLAY,
1278                                    screen);
1279     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1280         /* SDL allocates a buffer smaller than requested if the video
1281          * overlay hardware is unable to support the requested size. */
1282         fprintf(stderr, "Error: the video system does not support an image\n"
1283                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1284                         "to reduce the image size.\n", vp->width, vp->height );
1285         do_exit();
1286     }
1287
1288     SDL_LockMutex(is->pictq_mutex);
1289     vp->allocated = 1;
1290     SDL_CondSignal(is->pictq_cond);
1291     SDL_UnlockMutex(is->pictq_mutex);
1292 }
1293
1294 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1295  * guessed if not known. */
1296 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1297 {
1298     VideoPicture *vp;
1299 #if !CONFIG_AVFILTER
1300     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1301 #endif
1302     /* wait until we have space to put a new picture */
1303     SDL_LockMutex(is->pictq_mutex);
1304
1305     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1306         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1307
1308     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1309            !is->videoq.abort_request) {
1310         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1311     }
1312     SDL_UnlockMutex(is->pictq_mutex);
1313
1314     if (is->videoq.abort_request)
1315         return -1;
1316
1317     vp = &is->pictq[is->pictq_windex];
1318
1319     vp->sar = src_frame->sample_aspect_ratio;
1320
1321     /* alloc or resize hardware picture buffer */
1322     if (!vp->bmp || vp->reallocate ||
1323 #if CONFIG_AVFILTER
1324         vp->width  != is->out_video_filter->inputs[0]->w ||
1325         vp->height != is->out_video_filter->inputs[0]->h) {
1326 #else
1327         vp->width != is->video_st->codec->width ||
1328         vp->height != is->video_st->codec->height) {
1329 #endif
1330         SDL_Event event;
1331
1332         vp->allocated  = 0;
1333         vp->reallocate = 0;
1334
1335         /* the allocation must be done in the main thread to avoid
1336            locking problems */
1337         event.type = FF_ALLOC_EVENT;
1338         event.user.data1 = is;
1339         SDL_PushEvent(&event);
1340
1341         /* wait until the picture is allocated */
1342         SDL_LockMutex(is->pictq_mutex);
1343         while (!vp->allocated && !is->videoq.abort_request) {
1344             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1345         }
1346         SDL_UnlockMutex(is->pictq_mutex);
1347
1348         if (is->videoq.abort_request)
1349             return -1;
1350     }
1351
1352     /* if the frame is not skipped, then display it */
1353     if (vp->bmp) {
1354         uint8_t *data[4];
1355         int linesize[4];
1356
1357         /* get a pointer on the bitmap */
1358         SDL_LockYUVOverlay (vp->bmp);
1359
1360         data[0] = vp->bmp->pixels[0];
1361         data[1] = vp->bmp->pixels[2];
1362         data[2] = vp->bmp->pixels[1];
1363
1364         linesize[0] = vp->bmp->pitches[0];
1365         linesize[1] = vp->bmp->pitches[2];
1366         linesize[2] = vp->bmp->pitches[1];
1367
1368 #if CONFIG_AVFILTER
1369         // FIXME use direct rendering
1370         av_image_copy(data, linesize, src_frame->data, src_frame->linesize,
1371                       vp->pix_fmt, vp->width, vp->height);
1372 #else
1373         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1374         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1375             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1376             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1377         if (!is->img_convert_ctx) {
1378             fprintf(stderr, "Cannot initialize the conversion context\n");
1379             exit(1);
1380         }
1381         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1382                   0, vp->height, data, linesize);
1383 #endif
1384         /* update the bitmap content */
1385         SDL_UnlockYUVOverlay(vp->bmp);
1386
1387         vp->pts = pts;
1388         vp->pos = pos;
1389
1390         /* now we can update the picture count */
1391         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1392             is->pictq_windex = 0;
1393         SDL_LockMutex(is->pictq_mutex);
1394         vp->target_clock = compute_target_time(vp->pts, is);
1395
1396         is->pictq_size++;
1397         SDL_UnlockMutex(is->pictq_mutex);
1398     }
1399     return 0;
1400 }
1401
1402 /* Compute the exact PTS for the picture if it is omitted in the stream.
1403  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1404 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1405 {
1406     double frame_delay, pts;
1407     int ret;
1408
1409     pts = pts1;
1410
1411     if (pts != 0) {
1412         /* update video clock with pts, if present */
1413         is->video_clock = pts;
1414     } else {
1415         pts = is->video_clock;
1416     }
1417     /* update video clock for next frame */
1418     frame_delay = av_q2d(is->video_st->codec->time_base);
1419     /* for MPEG2, the frame can be repeated, so we update the
1420        clock accordingly */
1421     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1422     is->video_clock += frame_delay;
1423
1424     ret = queue_picture(is, src_frame, pts, pos);
1425     av_frame_unref(src_frame);
1426     return ret;
1427 }
1428
1429 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1430 {
1431     int got_picture, i;
1432
1433     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1434         return -1;
1435
1436     if (pkt->data == flush_pkt.data) {
1437         avcodec_flush_buffers(is->video_st->codec);
1438
1439         SDL_LockMutex(is->pictq_mutex);
1440         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1441         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1442             is->pictq[i].target_clock= 0;
1443         }
1444         while (is->pictq_size && !is->videoq.abort_request) {
1445             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1446         }
1447         is->video_current_pos = -1;
1448         SDL_UnlockMutex(is->pictq_mutex);
1449
1450         init_pts_correction(&is->pts_ctx);
1451         is->frame_last_pts = AV_NOPTS_VALUE;
1452         is->frame_last_delay = 0;
1453         is->frame_timer = (double)av_gettime_relative() / 1000000.0;
1454         is->skip_frames = 1;
1455         is->skip_frames_index = 0;
1456         return 0;
1457     }
1458
1459     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1460
1461     if (got_picture) {
1462         if (decoder_reorder_pts == -1) {
1463             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1464         } else if (decoder_reorder_pts) {
1465             *pts = frame->pkt_pts;
1466         } else {
1467             *pts = frame->pkt_dts;
1468         }
1469
1470         if (*pts == AV_NOPTS_VALUE) {
1471             *pts = 0;
1472         }
1473         if (is->video_st->sample_aspect_ratio.num) {
1474             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1475         }
1476
1477         is->skip_frames_index += 1;
1478         if (is->skip_frames_index >= is->skip_frames) {
1479             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1480             return 1;
1481         }
1482         av_frame_unref(frame);
1483     }
1484     return 0;
1485 }
1486
1487 #if CONFIG_AVFILTER
1488 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1489 {
1490     char sws_flags_str[128];
1491     char buffersrc_args[256];
1492     int ret;
1493     AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
1494     AVCodecContext *codec = is->video_st->codec;
1495
1496     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1497     graph->scale_sws_opts = av_strdup(sws_flags_str);
1498
1499     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1500              codec->width, codec->height, codec->pix_fmt,
1501              is->video_st->time_base.num, is->video_st->time_base.den,
1502              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1503
1504
1505     if ((ret = avfilter_graph_create_filter(&filt_src,
1506                                             avfilter_get_by_name("buffer"),
1507                                             "src", buffersrc_args, NULL,
1508                                             graph)) < 0)
1509         return ret;
1510     if ((ret = avfilter_graph_create_filter(&filt_out,
1511                                             avfilter_get_by_name("buffersink"),
1512                                             "out", NULL, NULL, graph)) < 0)
1513         return ret;
1514
1515     last_filter = filt_out;
1516
1517 /* Note: this macro adds a filter before the lastly added filter, so the
1518  * processing order of the filters is in reverse */
1519 #define INSERT_FILT(name, arg) do {                                          \
1520     AVFilterContext *filt_ctx;                                               \
1521                                                                              \
1522     ret = avfilter_graph_create_filter(&filt_ctx,                            \
1523                                        avfilter_get_by_name(name),           \
1524                                        "avplay_" name, arg, NULL, graph);    \
1525     if (ret < 0)                                                             \
1526         return ret;                                                          \
1527                                                                              \
1528     ret = avfilter_link(filt_ctx, 0, last_filter, 0);                        \
1529     if (ret < 0)                                                             \
1530         return ret;                                                          \
1531                                                                              \
1532     last_filter = filt_ctx;                                                  \
1533 } while (0)
1534
1535     INSERT_FILT("format", "yuv420p");
1536
1537     if (autorotate) {
1538         uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
1539                                                          AV_PKT_DATA_DISPLAYMATRIX, NULL);
1540         if (displaymatrix) {
1541             double rot = av_display_rotation_get((int32_t*) displaymatrix);
1542             if (rot < -135 || rot > 135) {
1543                 INSERT_FILT("vflip", NULL);
1544                 INSERT_FILT("hflip", NULL);
1545             } else if (rot < -45) {
1546                 INSERT_FILT("transpose", "dir=clock");
1547             } else if (rot > 45) {
1548                 INSERT_FILT("transpose", "dir=cclock");
1549             }
1550         }
1551     }
1552
1553     if (vfilters) {
1554         AVFilterInOut *outputs = avfilter_inout_alloc();
1555         AVFilterInOut *inputs  = avfilter_inout_alloc();
1556
1557         outputs->name    = av_strdup("in");
1558         outputs->filter_ctx = filt_src;
1559         outputs->pad_idx = 0;
1560         outputs->next    = NULL;
1561
1562         inputs->name    = av_strdup("out");
1563         inputs->filter_ctx = last_filter;
1564         inputs->pad_idx = 0;
1565         inputs->next    = NULL;
1566
1567         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1568             return ret;
1569     } else {
1570         if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
1571             return ret;
1572     }
1573
1574     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1575         return ret;
1576
1577     is->in_video_filter  = filt_src;
1578     is->out_video_filter = filt_out;
1579
1580     return ret;
1581 }
1582
1583 #endif  /* CONFIG_AVFILTER */
1584
1585 static int video_thread(void *arg)
1586 {
1587     AVPacket pkt = { 0 };
1588     VideoState *is = arg;
1589     AVFrame *frame = av_frame_alloc();
1590     int64_t pts_int;
1591     double pts;
1592     int ret;
1593
1594 #if CONFIG_AVFILTER
1595     AVFilterGraph *graph = avfilter_graph_alloc();
1596     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1597     int last_w = is->video_st->codec->width;
1598     int last_h = is->video_st->codec->height;
1599     if (!graph) {
1600         av_frame_free(&frame);
1601         return AVERROR(ENOMEM);
1602     }
1603
1604     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1605         goto the_end;
1606     filt_in  = is->in_video_filter;
1607     filt_out = is->out_video_filter;
1608 #endif
1609
1610     if (!frame) {
1611 #if CONFIG_AVFILTER
1612         avfilter_graph_free(&graph);
1613 #endif
1614         return AVERROR(ENOMEM);
1615     }
1616
1617     for (;;) {
1618 #if CONFIG_AVFILTER
1619         AVRational tb;
1620 #endif
1621         while (is->paused && !is->videoq.abort_request)
1622             SDL_Delay(10);
1623
1624         av_free_packet(&pkt);
1625
1626         ret = get_video_frame(is, frame, &pts_int, &pkt);
1627         if (ret < 0)
1628             goto the_end;
1629
1630         if (!ret)
1631             continue;
1632
1633 #if CONFIG_AVFILTER
1634         if (   last_w != is->video_st->codec->width
1635             || last_h != is->video_st->codec->height) {
1636             av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1637                     is->video_st->codec->width, is->video_st->codec->height);
1638             avfilter_graph_free(&graph);
1639             graph = avfilter_graph_alloc();
1640             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1641                 goto the_end;
1642             filt_in  = is->in_video_filter;
1643             filt_out = is->out_video_filter;
1644             last_w = is->video_st->codec->width;
1645             last_h = is->video_st->codec->height;
1646         }
1647
1648         frame->pts = pts_int;
1649         ret = av_buffersrc_add_frame(filt_in, frame);
1650         if (ret < 0)
1651             goto the_end;
1652
1653         while (ret >= 0) {
1654             ret = av_buffersink_get_frame(filt_out, frame);
1655             if (ret < 0) {
1656                 ret = 0;
1657                 break;
1658             }
1659
1660             pts_int = frame->pts;
1661             tb      = filt_out->inputs[0]->time_base;
1662             if (av_cmp_q(tb, is->video_st->time_base)) {
1663                 av_unused int64_t pts1 = pts_int;
1664                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1665                 av_log(NULL, AV_LOG_TRACE, "video_thread(): "
1666                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1667                         tb.num, tb.den, pts1,
1668                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1669             }
1670             pts = pts_int * av_q2d(is->video_st->time_base);
1671             ret = output_picture2(is, frame, pts, 0);
1672         }
1673 #else
1674         pts = pts_int * av_q2d(is->video_st->time_base);
1675         ret = output_picture2(is, frame, pts,  pkt.pos);
1676 #endif
1677
1678         if (ret < 0)
1679             goto the_end;
1680
1681
1682         if (step)
1683             if (cur_stream)
1684                 stream_pause(cur_stream);
1685     }
1686  the_end:
1687 #if CONFIG_AVFILTER
1688     av_freep(&vfilters);
1689     avfilter_graph_free(&graph);
1690 #endif
1691     av_free_packet(&pkt);
1692     av_frame_free(&frame);
1693     return 0;
1694 }
1695
1696 static int subtitle_thread(void *arg)
1697 {
1698     VideoState *is = arg;
1699     SubPicture *sp;
1700     AVPacket pkt1, *pkt = &pkt1;
1701     int got_subtitle;
1702     double pts;
1703     int i, j;
1704     int r, g, b, y, u, v, a;
1705
1706     for (;;) {
1707         while (is->paused && !is->subtitleq.abort_request) {
1708             SDL_Delay(10);
1709         }
1710         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1711             break;
1712
1713         if (pkt->data == flush_pkt.data) {
1714             avcodec_flush_buffers(is->subtitle_st->codec);
1715             continue;
1716         }
1717         SDL_LockMutex(is->subpq_mutex);
1718         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1719                !is->subtitleq.abort_request) {
1720             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1721         }
1722         SDL_UnlockMutex(is->subpq_mutex);
1723
1724         if (is->subtitleq.abort_request)
1725             return 0;
1726
1727         sp = &is->subpq[is->subpq_windex];
1728
1729        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1730            this packet, if any */
1731         pts = 0;
1732         if (pkt->pts != AV_NOPTS_VALUE)
1733             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1734
1735         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1736                                  &got_subtitle, pkt);
1737
1738         if (got_subtitle && sp->sub.format == 0) {
1739             sp->pts = pts;
1740
1741             for (i = 0; i < sp->sub.num_rects; i++)
1742             {
1743                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1744                 {
1745                     RGBA_IN(r, g, b, a, (uint32_t *)sp->sub.rects[i]->data[1] + j);
1746                     y = RGB_TO_Y_CCIR(r, g, b);
1747                     u = RGB_TO_U_CCIR(r, g, b, 0);
1748                     v = RGB_TO_V_CCIR(r, g, b, 0);
1749                     YUVA_OUT((uint32_t *)sp->sub.rects[i]->data[1] + j, y, u, v, a);
1750                 }
1751             }
1752
1753             /* now we can update the picture count */
1754             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1755                 is->subpq_windex = 0;
1756             SDL_LockMutex(is->subpq_mutex);
1757             is->subpq_size++;
1758             SDL_UnlockMutex(is->subpq_mutex);
1759         }
1760         av_free_packet(pkt);
1761     }
1762     return 0;
1763 }
1764
1765 /* copy samples for viewing in editor window */
1766 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1767 {
1768     int size, len;
1769
1770     size = samples_size / sizeof(short);
1771     while (size > 0) {
1772         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1773         if (len > size)
1774             len = size;
1775         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1776         samples += len;
1777         is->sample_array_index += len;
1778         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1779             is->sample_array_index = 0;
1780         size -= len;
1781     }
1782 }
1783
1784 /* return the new audio buffer size (samples can be added or deleted
1785    to get better sync if video or external master clock) */
1786 static int synchronize_audio(VideoState *is, short *samples,
1787                              int samples_size1, double pts)
1788 {
1789     int n, samples_size;
1790     double ref_clock;
1791
1792     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1793     samples_size = samples_size1;
1794
1795     /* if not master, then we try to remove or add samples to correct the clock */
1796     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1797          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1798         double diff, avg_diff;
1799         int wanted_size, min_size, max_size, nb_samples;
1800
1801         ref_clock = get_master_clock(is);
1802         diff = get_audio_clock(is) - ref_clock;
1803
1804         if (diff < AV_NOSYNC_THRESHOLD) {
1805             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1806             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1807                 /* not enough measures to have a correct estimate */
1808                 is->audio_diff_avg_count++;
1809             } else {
1810                 /* estimate the A-V difference */
1811                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1812
1813                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1814                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1815                     nb_samples = samples_size / n;
1816
1817                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1818                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1819                     if (wanted_size < min_size)
1820                         wanted_size = min_size;
1821                     else if (wanted_size > max_size)
1822                         wanted_size = max_size;
1823
1824                     /* add or remove samples to correction the synchro */
1825                     if (wanted_size < samples_size) {
1826                         /* remove samples */
1827                         samples_size = wanted_size;
1828                     } else if (wanted_size > samples_size) {
1829                         uint8_t *samples_end, *q;
1830                         int nb;
1831
1832                         /* add samples */
1833                         nb = (samples_size - wanted_size);
1834                         samples_end = (uint8_t *)samples + samples_size - n;
1835                         q = samples_end + n;
1836                         while (nb > 0) {
1837                             memcpy(q, samples_end, n);
1838                             q += n;
1839                             nb -= n;
1840                         }
1841                         samples_size = wanted_size;
1842                     }
1843                 }
1844                 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1845                         diff, avg_diff, samples_size - samples_size1,
1846                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1847             }
1848         } else {
1849             /* too big difference : may be initial PTS errors, so
1850                reset A-V filter */
1851             is->audio_diff_avg_count = 0;
1852             is->audio_diff_cum       = 0;
1853         }
1854     }
1855
1856     return samples_size;
1857 }
1858
1859 /* decode one audio frame and returns its uncompressed size */
1860 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1861 {
1862     AVPacket *pkt_temp = &is->audio_pkt_temp;
1863     AVPacket *pkt = &is->audio_pkt;
1864     AVCodecContext *dec = is->audio_st->codec;
1865     int n, len1, data_size, got_frame;
1866     double pts;
1867     int new_packet = 0;
1868     int flush_complete = 0;
1869
1870     for (;;) {
1871         /* NOTE: the audio packet can contain several frames */
1872         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1873             int resample_changed, audio_resample;
1874
1875             if (!is->frame) {
1876                 if (!(is->frame = av_frame_alloc()))
1877                     return AVERROR(ENOMEM);
1878             }
1879
1880             if (flush_complete)
1881                 break;
1882             new_packet = 0;
1883             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1884             if (len1 < 0) {
1885                 /* if error, we skip the frame */
1886                 pkt_temp->size = 0;
1887                 break;
1888             }
1889
1890             pkt_temp->data += len1;
1891             pkt_temp->size -= len1;
1892
1893             if (!got_frame) {
1894                 /* stop sending empty packets if the decoder is finished */
1895                 if (!pkt_temp->data && (dec->codec->capabilities & AV_CODEC_CAP_DELAY))
1896                     flush_complete = 1;
1897                 continue;
1898             }
1899             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1900                                                    is->frame->nb_samples,
1901                                                    is->frame->format, 1);
1902
1903             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1904                              is->frame->channel_layout != is->sdl_channel_layout ||
1905                              is->frame->sample_rate    != is->sdl_sample_rate;
1906
1907             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1908                                is->frame->channel_layout != is->resample_channel_layout ||
1909                                is->frame->sample_rate    != is->resample_sample_rate;
1910
1911             if ((!is->avr && audio_resample) || resample_changed) {
1912                 int ret;
1913                 if (is->avr)
1914                     avresample_close(is->avr);
1915                 else if (audio_resample) {
1916                     is->avr = avresample_alloc_context();
1917                     if (!is->avr) {
1918                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1919                         break;
1920                     }
1921                 }
1922                 if (audio_resample) {
1923                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1924                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1925                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1926                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1927                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1928                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1929
1930                     if ((ret = avresample_open(is->avr)) < 0) {
1931                         fprintf(stderr, "error initializing libavresample\n");
1932                         break;
1933                     }
1934                 }
1935                 is->resample_sample_fmt     = is->frame->format;
1936                 is->resample_channel_layout = is->frame->channel_layout;
1937                 is->resample_sample_rate    = is->frame->sample_rate;
1938             }
1939
1940             if (audio_resample) {
1941                 void *tmp_out;
1942                 int out_samples, out_size, out_linesize;
1943                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1944                 int nb_samples = is->frame->nb_samples;
1945
1946                 out_size = av_samples_get_buffer_size(&out_linesize,
1947                                                       is->sdl_channels,
1948                                                       nb_samples,
1949                                                       is->sdl_sample_fmt, 0);
1950                 tmp_out = av_realloc(is->audio_buf1, out_size);
1951                 if (!tmp_out)
1952                     return AVERROR(ENOMEM);
1953                 is->audio_buf1 = tmp_out;
1954
1955                 out_samples = avresample_convert(is->avr,
1956                                                  &is->audio_buf1,
1957                                                  out_linesize, nb_samples,
1958                                                  is->frame->data,
1959                                                  is->frame->linesize[0],
1960                                                  is->frame->nb_samples);
1961                 if (out_samples < 0) {
1962                     fprintf(stderr, "avresample_convert() failed\n");
1963                     break;
1964                 }
1965                 is->audio_buf = is->audio_buf1;
1966                 data_size = out_samples * osize * is->sdl_channels;
1967             } else {
1968                 is->audio_buf = is->frame->data[0];
1969             }
1970
1971             /* if no pts, then compute it */
1972             pts = is->audio_clock;
1973             *pts_ptr = pts;
1974             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1975             is->audio_clock += (double)data_size /
1976                 (double)(n * is->sdl_sample_rate);
1977 #ifdef DEBUG
1978             {
1979                 static double last_clock;
1980                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1981                        is->audio_clock - last_clock,
1982                        is->audio_clock, pts);
1983                 last_clock = is->audio_clock;
1984             }
1985 #endif
1986             return data_size;
1987         }
1988
1989         /* free the current packet */
1990         if (pkt->data)
1991             av_free_packet(pkt);
1992         memset(pkt_temp, 0, sizeof(*pkt_temp));
1993
1994         if (is->paused || is->audioq.abort_request) {
1995             return -1;
1996         }
1997
1998         /* read next packet */
1999         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2000             return -1;
2001
2002         if (pkt->data == flush_pkt.data) {
2003             avcodec_flush_buffers(dec);
2004             flush_complete = 0;
2005         }
2006
2007         *pkt_temp = *pkt;
2008
2009         /* if update the audio clock with the pts */
2010         if (pkt->pts != AV_NOPTS_VALUE) {
2011             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2012         }
2013     }
2014 }
2015
2016 /* prepare a new audio buffer */
2017 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2018 {
2019     VideoState *is = opaque;
2020     int audio_size, len1;
2021     double pts;
2022
2023     audio_callback_time = av_gettime_relative();
2024
2025     while (len > 0) {
2026         if (is->audio_buf_index >= is->audio_buf_size) {
2027            audio_size = audio_decode_frame(is, &pts);
2028            if (audio_size < 0) {
2029                 /* if error, just output silence */
2030                is->audio_buf      = is->silence_buf;
2031                is->audio_buf_size = sizeof(is->silence_buf);
2032            } else {
2033                if (is->show_audio)
2034                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2035                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2036                                               pts);
2037                is->audio_buf_size = audio_size;
2038            }
2039            is->audio_buf_index = 0;
2040         }
2041         len1 = is->audio_buf_size - is->audio_buf_index;
2042         if (len1 > len)
2043             len1 = len;
2044         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2045         len -= len1;
2046         stream += len1;
2047         is->audio_buf_index += len1;
2048     }
2049 }
2050
2051 /* open a given stream. Return 0 if OK */
2052 static int stream_component_open(VideoState *is, int stream_index)
2053 {
2054     AVFormatContext *ic = is->ic;
2055     AVCodecContext *avctx;
2056     AVCodec *codec;
2057     SDL_AudioSpec wanted_spec, spec;
2058     AVDictionary *opts;
2059     AVDictionaryEntry *t = NULL;
2060     int ret = 0;
2061
2062     if (stream_index < 0 || stream_index >= ic->nb_streams)
2063         return -1;
2064     avctx = ic->streams[stream_index]->codec;
2065
2066     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2067
2068     codec = avcodec_find_decoder(avctx->codec_id);
2069     avctx->workaround_bugs   = workaround_bugs;
2070     avctx->idct_algo         = idct;
2071     avctx->skip_frame        = skip_frame;
2072     avctx->skip_idct         = skip_idct;
2073     avctx->skip_loop_filter  = skip_loop_filter;
2074     avctx->error_concealment = error_concealment;
2075
2076     if (fast)
2077         avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2078
2079     if (!av_dict_get(opts, "threads", NULL, 0))
2080         av_dict_set(&opts, "threads", "auto", 0);
2081     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2082         av_dict_set(&opts, "refcounted_frames", "1", 0);
2083     if (!codec ||
2084         (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2085         goto fail;
2086     }
2087     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2088         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2089         ret =  AVERROR_OPTION_NOT_FOUND;
2090         goto fail;
2091     }
2092
2093     /* prepare audio output */
2094     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2095         is->sdl_sample_rate = avctx->sample_rate;
2096
2097         if (!avctx->channel_layout)
2098             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2099         if (!avctx->channel_layout) {
2100             fprintf(stderr, "unable to guess channel layout\n");
2101             ret = AVERROR_INVALIDDATA;
2102             goto fail;
2103         }
2104         if (avctx->channels == 1)
2105             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2106         else
2107             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2108         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2109
2110         wanted_spec.format = AUDIO_S16SYS;
2111         wanted_spec.freq = is->sdl_sample_rate;
2112         wanted_spec.channels = is->sdl_channels;
2113         wanted_spec.silence = 0;
2114         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2115         wanted_spec.callback = sdl_audio_callback;
2116         wanted_spec.userdata = is;
2117         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2118             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2119             ret = AVERROR_UNKNOWN;
2120             goto fail;
2121         }
2122         is->audio_hw_buf_size = spec.size;
2123         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2124         is->resample_sample_fmt     = is->sdl_sample_fmt;
2125         is->resample_channel_layout = avctx->channel_layout;
2126         is->resample_sample_rate    = avctx->sample_rate;
2127     }
2128
2129     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2130     switch (avctx->codec_type) {
2131     case AVMEDIA_TYPE_AUDIO:
2132         is->audio_stream = stream_index;
2133         is->audio_st = ic->streams[stream_index];
2134         is->audio_buf_size  = 0;
2135         is->audio_buf_index = 0;
2136
2137         /* init averaging filter */
2138         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2139         is->audio_diff_avg_count = 0;
2140         /* since we do not have a precise anough audio fifo fullness,
2141            we correct audio sync only if larger than this threshold */
2142         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2143
2144         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2145         packet_queue_init(&is->audioq);
2146         SDL_PauseAudio(0);
2147         break;
2148     case AVMEDIA_TYPE_VIDEO:
2149         is->video_stream = stream_index;
2150         is->video_st = ic->streams[stream_index];
2151
2152         packet_queue_init(&is->videoq);
2153         is->video_tid = SDL_CreateThread(video_thread, is);
2154         break;
2155     case AVMEDIA_TYPE_SUBTITLE:
2156         is->subtitle_stream = stream_index;
2157         is->subtitle_st = ic->streams[stream_index];
2158         packet_queue_init(&is->subtitleq);
2159
2160         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2161         break;
2162     default:
2163         break;
2164     }
2165
2166 fail:
2167     av_dict_free(&opts);
2168
2169     return ret;
2170 }
2171
2172 static void stream_component_close(VideoState *is, int stream_index)
2173 {
2174     AVFormatContext *ic = is->ic;
2175     AVCodecContext *avctx;
2176
2177     if (stream_index < 0 || stream_index >= ic->nb_streams)
2178         return;
2179     avctx = ic->streams[stream_index]->codec;
2180
2181     switch (avctx->codec_type) {
2182     case AVMEDIA_TYPE_AUDIO:
2183         packet_queue_abort(&is->audioq);
2184
2185         SDL_CloseAudio();
2186
2187         packet_queue_end(&is->audioq);
2188         av_free_packet(&is->audio_pkt);
2189         if (is->avr)
2190             avresample_free(&is->avr);
2191         av_freep(&is->audio_buf1);
2192         is->audio_buf = NULL;
2193         av_frame_free(&is->frame);
2194
2195         if (is->rdft) {
2196             av_rdft_end(is->rdft);
2197             av_freep(&is->rdft_data);
2198             is->rdft = NULL;
2199             is->rdft_bits = 0;
2200         }
2201         break;
2202     case AVMEDIA_TYPE_VIDEO:
2203         packet_queue_abort(&is->videoq);
2204
2205         /* note: we also signal this mutex to make sure we deblock the
2206            video thread in all cases */
2207         SDL_LockMutex(is->pictq_mutex);
2208         SDL_CondSignal(is->pictq_cond);
2209         SDL_UnlockMutex(is->pictq_mutex);
2210
2211         SDL_WaitThread(is->video_tid, NULL);
2212
2213         packet_queue_end(&is->videoq);
2214         break;
2215     case AVMEDIA_TYPE_SUBTITLE:
2216         packet_queue_abort(&is->subtitleq);
2217
2218         /* note: we also signal this mutex to make sure we deblock the
2219            video thread in all cases */
2220         SDL_LockMutex(is->subpq_mutex);
2221         is->subtitle_stream_changed = 1;
2222
2223         SDL_CondSignal(is->subpq_cond);
2224         SDL_UnlockMutex(is->subpq_mutex);
2225
2226         SDL_WaitThread(is->subtitle_tid, NULL);
2227
2228         packet_queue_end(&is->subtitleq);
2229         break;
2230     default:
2231         break;
2232     }
2233
2234     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2235     avcodec_close(avctx);
2236     switch (avctx->codec_type) {
2237     case AVMEDIA_TYPE_AUDIO:
2238         is->audio_st = NULL;
2239         is->audio_stream = -1;
2240         break;
2241     case AVMEDIA_TYPE_VIDEO:
2242         is->video_st = NULL;
2243         is->video_stream = -1;
2244         break;
2245     case AVMEDIA_TYPE_SUBTITLE:
2246         is->subtitle_st = NULL;
2247         is->subtitle_stream = -1;
2248         break;
2249     default:
2250         break;
2251     }
2252 }
2253
2254 /* since we have only one decoding thread, we can use a global
2255    variable instead of a thread local variable */
2256 static VideoState *global_video_state;
2257
2258 static int decode_interrupt_cb(void *ctx)
2259 {
2260     return global_video_state && global_video_state->abort_request;
2261 }
2262
2263 /* this thread gets the stream from the disk or the network */
2264 static int decode_thread(void *arg)
2265 {
2266     VideoState *is = arg;
2267     AVFormatContext *ic = NULL;
2268     int err, i, ret;
2269     int st_index[AVMEDIA_TYPE_NB];
2270     AVPacket pkt1, *pkt = &pkt1;
2271     int eof = 0;
2272     int pkt_in_play_range = 0;
2273     AVDictionaryEntry *t;
2274     AVDictionary **opts;
2275     int orig_nb_streams;
2276
2277     memset(st_index, -1, sizeof(st_index));
2278     is->video_stream = -1;
2279     is->audio_stream = -1;
2280     is->subtitle_stream = -1;
2281
2282     global_video_state = is;
2283
2284     ic = avformat_alloc_context();
2285     if (!ic) {
2286         av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2287         ret = AVERROR(ENOMEM);
2288         goto fail;
2289     }
2290     ic->interrupt_callback.callback = decode_interrupt_cb;
2291     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2292     if (err < 0) {
2293         print_error(is->filename, err);
2294         ret = -1;
2295         goto fail;
2296     }
2297     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2298         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2299         ret = AVERROR_OPTION_NOT_FOUND;
2300         goto fail;
2301     }
2302     is->ic = ic;
2303
2304     if (genpts)
2305         ic->flags |= AVFMT_FLAG_GENPTS;
2306
2307     opts = setup_find_stream_info_opts(ic, codec_opts);
2308     orig_nb_streams = ic->nb_streams;
2309
2310     err = avformat_find_stream_info(ic, opts);
2311
2312     for (i = 0; i < orig_nb_streams; i++)
2313         av_dict_free(&opts[i]);
2314     av_freep(&opts);
2315
2316     if (err < 0) {
2317         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2318         ret = -1;
2319         goto fail;
2320     }
2321
2322     if (ic->pb)
2323         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2324
2325     if (seek_by_bytes < 0)
2326         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2327
2328     /* if seeking requested, we execute it */
2329     if (start_time != AV_NOPTS_VALUE) {
2330         int64_t timestamp;
2331
2332         timestamp = start_time;
2333         /* add the stream start time */
2334         if (ic->start_time != AV_NOPTS_VALUE)
2335             timestamp += ic->start_time;
2336         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2337         if (ret < 0) {
2338             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2339                     is->filename, (double)timestamp / AV_TIME_BASE);
2340         }
2341     }
2342
2343     for (i = 0; i < ic->nb_streams; i++)
2344         ic->streams[i]->discard = AVDISCARD_ALL;
2345     if (!video_disable)
2346         st_index[AVMEDIA_TYPE_VIDEO] =
2347             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2348                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2349     if (!audio_disable)
2350         st_index[AVMEDIA_TYPE_AUDIO] =
2351             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2352                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2353                                 st_index[AVMEDIA_TYPE_VIDEO],
2354                                 NULL, 0);
2355     if (!video_disable)
2356         st_index[AVMEDIA_TYPE_SUBTITLE] =
2357             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2358                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2359                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2360                                  st_index[AVMEDIA_TYPE_AUDIO] :
2361                                  st_index[AVMEDIA_TYPE_VIDEO]),
2362                                 NULL, 0);
2363     if (show_status) {
2364         av_dump_format(ic, 0, is->filename, 0);
2365     }
2366
2367     /* open the streams */
2368     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2369         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2370     }
2371
2372     ret = -1;
2373     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2374         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2375     }
2376     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2377     if (ret < 0) {
2378         if (!display_disable)
2379             is->show_audio = 2;
2380     }
2381
2382     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2383         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2384     }
2385
2386     if (is->video_stream < 0 && is->audio_stream < 0) {
2387         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2388         ret = -1;
2389         goto fail;
2390     }
2391
2392     for (;;) {
2393         if (is->abort_request)
2394             break;
2395         if (is->paused != is->last_paused) {
2396             is->last_paused = is->paused;
2397             if (is->paused)
2398                 is->read_pause_return = av_read_pause(ic);
2399             else
2400                 av_read_play(ic);
2401         }
2402 #if CONFIG_RTSP_DEMUXER
2403         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2404             /* wait 10 ms to avoid trying to get another packet */
2405             /* XXX: horrible */
2406             SDL_Delay(10);
2407             continue;
2408         }
2409 #endif
2410         if (is->seek_req) {
2411             int64_t seek_target = is->seek_pos;
2412             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2413             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2414 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2415 //      of the seek_pos/seek_rel variables
2416
2417             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2418             if (ret < 0) {
2419                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2420             } else {
2421                 if (is->audio_stream >= 0) {
2422                     packet_queue_flush(&is->audioq);
2423                     packet_queue_put(&is->audioq, &flush_pkt);
2424                 }
2425                 if (is->subtitle_stream >= 0) {
2426                     packet_queue_flush(&is->subtitleq);
2427                     packet_queue_put(&is->subtitleq, &flush_pkt);
2428                 }
2429                 if (is->video_stream >= 0) {
2430                     packet_queue_flush(&is->videoq);
2431                     packet_queue_put(&is->videoq, &flush_pkt);
2432                 }
2433             }
2434             is->seek_req = 0;
2435             eof = 0;
2436         }
2437
2438         /* if the queue are full, no need to read more */
2439         if (!infinite_buffer &&
2440               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2441             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2442                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2443                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2444             /* wait 10 ms */
2445             SDL_Delay(10);
2446             continue;
2447         }
2448         if (eof) {
2449             if (is->video_stream >= 0) {
2450                 av_init_packet(pkt);
2451                 pkt->data = NULL;
2452                 pkt->size = 0;
2453                 pkt->stream_index = is->video_stream;
2454                 packet_queue_put(&is->videoq, pkt);
2455             }
2456             if (is->audio_stream >= 0 &&
2457                 (is->audio_st->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) {
2458                 av_init_packet(pkt);
2459                 pkt->data = NULL;
2460                 pkt->size = 0;
2461                 pkt->stream_index = is->audio_stream;
2462                 packet_queue_put(&is->audioq, pkt);
2463             }
2464             SDL_Delay(10);
2465             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2466                 if (loop != 1 && (!loop || --loop)) {
2467                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2468                 } else if (!noautoexit) {
2469                     ret = AVERROR_EOF;
2470                     goto fail;
2471                 }
2472             }
2473             continue;
2474         }
2475         ret = av_read_frame(ic, pkt);
2476         if (ret < 0) {
2477             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2478                 eof = 1;
2479             if (ic->pb && ic->pb->error)
2480                 break;
2481             SDL_Delay(100); /* wait for user event */
2482             continue;
2483         }
2484         /* check if packet is in play range specified by user, then queue, otherwise discard */
2485         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2486                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2487                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2488                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2489                 <= ((double)duration / 1000000);
2490         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2491             packet_queue_put(&is->audioq, pkt);
2492         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2493             packet_queue_put(&is->videoq, pkt);
2494         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2495             packet_queue_put(&is->subtitleq, pkt);
2496         } else {
2497             av_free_packet(pkt);
2498         }
2499     }
2500     /* wait until the end */
2501     while (!is->abort_request) {
2502         SDL_Delay(100);
2503     }
2504
2505     ret = 0;
2506  fail:
2507     /* disable interrupting */
2508     global_video_state = NULL;
2509
2510     /* close each stream */
2511     if (is->audio_stream >= 0)
2512         stream_component_close(is, is->audio_stream);
2513     if (is->video_stream >= 0)
2514         stream_component_close(is, is->video_stream);
2515     if (is->subtitle_stream >= 0)
2516         stream_component_close(is, is->subtitle_stream);
2517     if (is->ic) {
2518         avformat_close_input(&is->ic);
2519     }
2520
2521     if (ret != 0) {
2522         SDL_Event event;
2523
2524         event.type = FF_QUIT_EVENT;
2525         event.user.data1 = is;
2526         SDL_PushEvent(&event);
2527     }
2528     return 0;
2529 }
2530
2531 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2532 {
2533     VideoState *is;
2534
2535     is = av_mallocz(sizeof(VideoState));
2536     if (!is)
2537         return NULL;
2538     av_strlcpy(is->filename, filename, sizeof(is->filename));
2539     is->iformat = iformat;
2540     is->ytop    = 0;
2541     is->xleft   = 0;
2542
2543     /* start video display */
2544     is->pictq_mutex = SDL_CreateMutex();
2545     is->pictq_cond  = SDL_CreateCond();
2546
2547     is->subpq_mutex = SDL_CreateMutex();
2548     is->subpq_cond  = SDL_CreateCond();
2549
2550     is->av_sync_type = av_sync_type;
2551     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2552     if (!is->parse_tid) {
2553         av_free(is);
2554         return NULL;
2555     }
2556     return is;
2557 }
2558
2559 static void stream_cycle_channel(VideoState *is, int codec_type)
2560 {
2561     AVFormatContext *ic = is->ic;
2562     int start_index, stream_index;
2563     AVStream *st;
2564
2565     if (codec_type == AVMEDIA_TYPE_VIDEO)
2566         start_index = is->video_stream;
2567     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2568         start_index = is->audio_stream;
2569     else
2570         start_index = is->subtitle_stream;
2571     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2572         return;
2573     stream_index = start_index;
2574     for (;;) {
2575         if (++stream_index >= is->ic->nb_streams)
2576         {
2577             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2578             {
2579                 stream_index = -1;
2580                 goto the_end;
2581             } else
2582                 stream_index = 0;
2583         }
2584         if (stream_index == start_index)
2585             return;
2586         st = ic->streams[stream_index];
2587         if (st->codec->codec_type == codec_type) {
2588             /* check that parameters are OK */
2589             switch (codec_type) {
2590             case AVMEDIA_TYPE_AUDIO:
2591                 if (st->codec->sample_rate != 0 &&
2592                     st->codec->channels != 0)
2593                     goto the_end;
2594                 break;
2595             case AVMEDIA_TYPE_VIDEO:
2596             case AVMEDIA_TYPE_SUBTITLE:
2597                 goto the_end;
2598             default:
2599                 break;
2600             }
2601         }
2602     }
2603  the_end:
2604     stream_component_close(is, start_index);
2605     stream_component_open(is, stream_index);
2606 }
2607
2608
2609 static void toggle_full_screen(void)
2610 {
2611 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2612     /* OS X needs to empty the picture_queue */
2613     int i;
2614     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2615         cur_stream->pictq[i].reallocate = 1;
2616 #endif
2617     is_full_screen = !is_full_screen;
2618     video_open(cur_stream);
2619 }
2620
2621 static void toggle_pause(void)
2622 {
2623     if (cur_stream)
2624         stream_pause(cur_stream);
2625     step = 0;
2626 }
2627
2628 static void step_to_next_frame(void)
2629 {
2630     if (cur_stream) {
2631         /* if the stream is paused unpause it, then step */
2632         if (cur_stream->paused)
2633             stream_pause(cur_stream);
2634     }
2635     step = 1;
2636 }
2637
2638 static void toggle_audio_display(void)
2639 {
2640     if (cur_stream) {
2641         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2642         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2643         fill_rectangle(screen,
2644                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2645                        bgcolor);
2646         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2647     }
2648 }
2649
2650 static void seek_chapter(VideoState *is, int incr)
2651 {
2652     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2653     int i;
2654
2655     if (!is->ic->nb_chapters)
2656         return;
2657
2658     /* find the current chapter */
2659     for (i = 0; i < is->ic->nb_chapters; i++) {
2660         AVChapter *ch = is->ic->chapters[i];
2661         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2662             i--;
2663             break;
2664         }
2665     }
2666
2667     i += incr;
2668     i = FFMAX(i, 0);
2669     if (i >= is->ic->nb_chapters)
2670         return;
2671
2672     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2673     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2674                                  AV_TIME_BASE_Q), 0, 0);
2675 }
2676
2677 /* handle an event sent by the GUI */
2678 static void event_loop(void)
2679 {
2680     SDL_Event event;
2681     double incr, pos, frac;
2682
2683     for (;;) {
2684         double x;
2685         SDL_WaitEvent(&event);
2686         switch (event.type) {
2687         case SDL_KEYDOWN:
2688             if (exit_on_keydown) {
2689                 do_exit();
2690                 break;
2691             }
2692             switch (event.key.keysym.sym) {
2693             case SDLK_ESCAPE:
2694             case SDLK_q:
2695                 do_exit();
2696                 break;
2697             case SDLK_f:
2698                 toggle_full_screen();
2699                 break;
2700             case SDLK_p:
2701             case SDLK_SPACE:
2702                 toggle_pause();
2703                 break;
2704             case SDLK_s: // S: Step to next frame
2705                 step_to_next_frame();
2706                 break;
2707             case SDLK_a:
2708                 if (cur_stream)
2709                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2710                 break;
2711             case SDLK_v:
2712                 if (cur_stream)
2713                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2714                 break;
2715             case SDLK_t:
2716                 if (cur_stream)
2717                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2718                 break;
2719             case SDLK_w:
2720                 toggle_audio_display();
2721                 break;
2722             case SDLK_PAGEUP:
2723                 seek_chapter(cur_stream, 1);
2724                 break;
2725             case SDLK_PAGEDOWN:
2726                 seek_chapter(cur_stream, -1);
2727                 break;
2728             case SDLK_LEFT:
2729                 incr = -10.0;
2730                 goto do_seek;
2731             case SDLK_RIGHT:
2732                 incr = 10.0;
2733                 goto do_seek;
2734             case SDLK_UP:
2735                 incr = 60.0;
2736                 goto do_seek;
2737             case SDLK_DOWN:
2738                 incr = -60.0;
2739             do_seek:
2740                 if (cur_stream) {
2741                     if (seek_by_bytes) {
2742                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2743                             pos = cur_stream->video_current_pos;
2744                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2745                             pos = cur_stream->audio_pkt.pos;
2746                         } else
2747                             pos = avio_tell(cur_stream->ic->pb);
2748                         if (cur_stream->ic->bit_rate)
2749                             incr *= cur_stream->ic->bit_rate / 8.0;
2750                         else
2751                             incr *= 180000.0;
2752                         pos += incr;
2753                         stream_seek(cur_stream, pos, incr, 1);
2754                     } else {
2755                         pos = get_master_clock(cur_stream);
2756                         pos += incr;
2757                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2758                     }
2759                 }
2760                 break;
2761             default:
2762                 break;
2763             }
2764             break;
2765         case SDL_MOUSEBUTTONDOWN:
2766             if (exit_on_mousedown) {
2767                 do_exit();
2768                 break;
2769             }
2770         case SDL_MOUSEMOTION:
2771             if (event.type == SDL_MOUSEBUTTONDOWN) {
2772                 x = event.button.x;
2773             } else {
2774                 if (event.motion.state != SDL_PRESSED)
2775                     break;
2776                 x = event.motion.x;
2777             }
2778             if (cur_stream) {
2779                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2780                     uint64_t size =  avio_size(cur_stream->ic->pb);
2781                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2782                 } else {
2783                     int64_t ts;
2784                     int ns, hh, mm, ss;
2785                     int tns, thh, tmm, tss;
2786                     tns  = cur_stream->ic->duration / 1000000LL;
2787                     thh  = tns / 3600;
2788                     tmm  = (tns % 3600) / 60;
2789                     tss  = (tns % 60);
2790                     frac = x / cur_stream->width;
2791                     ns   = frac * tns;
2792                     hh   = ns / 3600;
2793                     mm   = (ns % 3600) / 60;
2794                     ss   = (ns % 60);
2795                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2796                             hh, mm, ss, thh, tmm, tss);
2797                     ts = frac * cur_stream->ic->duration;
2798                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2799                         ts += cur_stream->ic->start_time;
2800                     stream_seek(cur_stream, ts, 0, 0);
2801                 }
2802             }
2803             break;
2804         case SDL_VIDEORESIZE:
2805             if (cur_stream) {
2806                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2807                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2808                 screen_width  = cur_stream->width  = event.resize.w;
2809                 screen_height = cur_stream->height = event.resize.h;
2810             }
2811             break;
2812         case SDL_QUIT:
2813         case FF_QUIT_EVENT:
2814             do_exit();
2815             break;
2816         case FF_ALLOC_EVENT:
2817             video_open(event.user.data1);
2818             alloc_picture(event.user.data1);
2819             break;
2820         case FF_REFRESH_EVENT:
2821             video_refresh_timer(event.user.data1);
2822             cur_stream->refresh = 0;
2823             break;
2824         default:
2825             break;
2826         }
2827     }
2828 }
2829
2830 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2831 {
2832     av_log(NULL, AV_LOG_ERROR,
2833            "Option '%s' has been removed, use private format options instead\n", opt);
2834     return AVERROR(EINVAL);
2835 }
2836
2837 static int opt_width(void *optctx, const char *opt, const char *arg)
2838 {
2839     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2840     return 0;
2841 }
2842
2843 static int opt_height(void *optctx, const char *opt, const char *arg)
2844 {
2845     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2846     return 0;
2847 }
2848
2849 static int opt_format(void *optctx, const char *opt, const char *arg)
2850 {
2851     file_iformat = av_find_input_format(arg);
2852     if (!file_iformat) {
2853         fprintf(stderr, "Unknown input format: %s\n", arg);
2854         return AVERROR(EINVAL);
2855     }
2856     return 0;
2857 }
2858
2859 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2860 {
2861     av_log(NULL, AV_LOG_ERROR,
2862            "Option '%s' has been removed, use private format options instead\n", opt);
2863     return AVERROR(EINVAL);
2864 }
2865
2866 static int opt_sync(void *optctx, const char *opt, const char *arg)
2867 {
2868     if (!strcmp(arg, "audio"))
2869         av_sync_type = AV_SYNC_AUDIO_MASTER;
2870     else if (!strcmp(arg, "video"))
2871         av_sync_type = AV_SYNC_VIDEO_MASTER;
2872     else if (!strcmp(arg, "ext"))
2873         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2874     else {
2875         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2876         exit(1);
2877     }
2878     return 0;
2879 }
2880
2881 static int opt_seek(void *optctx, const char *opt, const char *arg)
2882 {
2883     start_time = parse_time_or_die(opt, arg, 1);
2884     return 0;
2885 }
2886
2887 static int opt_duration(void *optctx, const char *opt, const char *arg)
2888 {
2889     duration = parse_time_or_die(opt, arg, 1);
2890     return 0;
2891 }
2892
2893 static const OptionDef options[] = {
2894 #include "cmdutils_common_opts.h"
2895     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2896     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2897     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2898     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2899     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2900     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2901     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2902     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2903     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2904     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2905     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2906     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2907     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2908     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2909     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2910     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2911     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2912     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2913     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2914     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2915     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2916     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2917     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2918     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2919     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2920     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2921     { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
2922     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2923     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2924     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2925     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2926     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2927     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2928 #if CONFIG_AVFILTER
2929     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2930 #endif
2931     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2932     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2933     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2934     { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
2935     { NULL, },
2936 };
2937
2938 static void show_usage(void)
2939 {
2940     printf("Simple media player\n");
2941     printf("usage: %s [options] input_file\n", program_name);
2942     printf("\n");
2943 }
2944
2945 void show_help_default(const char *opt, const char *arg)
2946 {
2947     av_log_set_callback(log_callback_help);
2948     show_usage();
2949     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2950     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2951     printf("\n");
2952     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2953     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2954 #if !CONFIG_AVFILTER
2955     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2956 #endif
2957     printf("\nWhile playing:\n"
2958            "q, ESC              quit\n"
2959            "f                   toggle full screen\n"
2960            "p, SPC              pause\n"
2961            "a                   cycle audio channel\n"
2962            "v                   cycle video channel\n"
2963            "t                   cycle subtitle channel\n"
2964            "w                   show audio waves\n"
2965            "s                   activate frame-step mode\n"
2966            "left/right          seek backward/forward 10 seconds\n"
2967            "down/up             seek backward/forward 1 minute\n"
2968            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2969            );
2970 }
2971
2972 static void opt_input_file(void *optctx, const char *filename)
2973 {
2974     if (input_filename) {
2975         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2976                 filename, input_filename);
2977         exit(1);
2978     }
2979     if (!strcmp(filename, "-"))
2980         filename = "pipe:";
2981     input_filename = filename;
2982 }
2983
2984 /* Called from the main */
2985 int main(int argc, char **argv)
2986 {
2987     int flags;
2988
2989     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2990     parse_loglevel(argc, argv, options);
2991
2992     /* register all codecs, demux and protocols */
2993     avcodec_register_all();
2994 #if CONFIG_AVDEVICE
2995     avdevice_register_all();
2996 #endif
2997 #if CONFIG_AVFILTER
2998     avfilter_register_all();
2999 #endif
3000     av_register_all();
3001     avformat_network_init();
3002
3003     init_opts();
3004
3005     show_banner();
3006
3007     parse_options(NULL, argc, argv, options, opt_input_file);
3008
3009     if (!input_filename) {
3010         show_usage();
3011         fprintf(stderr, "An input file must be specified\n");
3012         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3013         exit(1);
3014     }
3015
3016     if (display_disable) {
3017         video_disable = 1;
3018     }
3019     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3020 #if !defined(__MINGW32__) && !defined(__APPLE__)
3021     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3022 #endif
3023     if (SDL_Init (flags)) {
3024         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3025         exit(1);
3026     }
3027
3028     if (!display_disable) {
3029         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3030         fs_screen_width = vi->current_w;
3031         fs_screen_height = vi->current_h;
3032     }
3033
3034     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3035     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3036     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3037
3038     av_init_packet(&flush_pkt);
3039     flush_pkt.data = (uint8_t *)&flush_pkt;
3040
3041     cur_stream = stream_open(input_filename, file_iformat);
3042
3043     event_loop();
3044
3045     /* never returns */
3046
3047     return 0;
3048 }