avplay: Factorize code for adding filters to the filter pipeline
[ffmpeg.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include <stdint.h>
27
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/mathematics.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/dict.h"
34 #include "libavutil/parseutils.h"
35 #include "libavutil/samplefmt.h"
36 #include "libavutil/time.h"
37 #include "libavformat/avformat.h"
38 #include "libavdevice/avdevice.h"
39 #include "libswscale/swscale.h"
40 #include "libavresample/avresample.h"
41 #include "libavutil/opt.h"
42 #include "libavcodec/avfft.h"
43
44 #if CONFIG_AVFILTER
45 # include "libavfilter/avfilter.h"
46 # include "libavfilter/buffersink.h"
47 # include "libavfilter/buffersrc.h"
48 #endif
49
50 #include "cmdutils.h"
51
52 #include <SDL.h>
53 #include <SDL_thread.h>
54
55 #ifdef __MINGW32__
56 #undef main /* We don't want SDL to override our main() */
57 #endif
58
59 #include <assert.h>
60
61 const char program_name[] = "avplay";
62 const int program_birth_year = 2003;
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2 * 65536)
87
88 static int64_t sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;             // presentation timestamp for this picture
104     double target_clock;    // av_gettime_relative() time at which this should be displayed ideally
105     int64_t pos;            // byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     int reallocate;
110     enum AVPixelFormat pix_fmt;
111
112     AVRational sar;
113 } VideoPicture;
114
115 typedef struct SubPicture {
116     double pts; /* presentation time stamp for this picture */
117     AVSubtitle sub;
118 } SubPicture;
119
120 enum {
121     AV_SYNC_AUDIO_MASTER, /* default choice */
122     AV_SYNC_VIDEO_MASTER,
123     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124 };
125
126 typedef struct VideoState {
127     SDL_Thread *parse_tid;
128     SDL_Thread *video_tid;
129     SDL_Thread *refresh_tid;
130     AVInputFormat *iformat;
131     int no_background;
132     int abort_request;
133     int paused;
134     int last_paused;
135     int seek_req;
136     int seek_flags;
137     int64_t seek_pos;
138     int64_t seek_rel;
139     int read_pause_return;
140     AVFormatContext *ic;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
157     uint8_t *audio_buf;
158     uint8_t *audio_buf1;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     AVPacket audio_pkt_temp;
162     AVPacket audio_pkt;
163     enum AVSampleFormat sdl_sample_fmt;
164     uint64_t sdl_channel_layout;
165     int sdl_channels;
166     int sdl_sample_rate;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     int resample_sample_rate;
170     AVAudioResampleContext *avr;
171     AVFrame *frame;
172
173     int show_audio; /* if true, display audio samples */
174     int16_t sample_array[SAMPLE_ARRAY_SIZE];
175     int sample_array_index;
176     int last_i_start;
177     RDFTContext *rdft;
178     int rdft_bits;
179     FFTSample *rdft_data;
180     int xpos;
181
182     SDL_Thread *subtitle_tid;
183     int subtitle_stream;
184     int subtitle_stream_changed;
185     AVStream *subtitle_st;
186     PacketQueue subtitleq;
187     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
188     int subpq_size, subpq_rindex, subpq_windex;
189     SDL_mutex *subpq_mutex;
190     SDL_cond *subpq_cond;
191
192     double frame_timer;
193     double frame_last_pts;
194     double frame_last_delay;
195     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
196     int video_stream;
197     AVStream *video_st;
198     PacketQueue videoq;
199     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
200     double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
201     int64_t video_current_pos;      // current displayed file pos
202     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
203     int pictq_size, pictq_rindex, pictq_windex;
204     SDL_mutex *pictq_mutex;
205     SDL_cond *pictq_cond;
206 #if !CONFIG_AVFILTER
207     struct SwsContext *img_convert_ctx;
208 #endif
209
210     //    QETimer *video_timer;
211     char filename[1024];
212     int width, height, xleft, ytop;
213
214     PtsCorrectionContext pts_ctx;
215
216 #if CONFIG_AVFILTER
217     AVFilterContext *in_video_filter;   // the first filter in the video chain
218     AVFilterContext *out_video_filter;  // the last filter in the video chain
219 #endif
220
221     float skip_frames;
222     float skip_frames_index;
223     int refresh;
224 } VideoState;
225
226 /* options specified by the user */
227 static AVInputFormat *file_iformat;
228 static const char *input_filename;
229 static const char *window_title;
230 static int fs_screen_width;
231 static int fs_screen_height;
232 static int screen_width  = 0;
233 static int screen_height = 0;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[AVMEDIA_TYPE_NB] = {
237     [AVMEDIA_TYPE_AUDIO]    = -1,
238     [AVMEDIA_TYPE_VIDEO]    = -1,
239     [AVMEDIA_TYPE_SUBTITLE] = -1,
240 };
241 static int seek_by_bytes = -1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int64_t duration = AV_NOPTS_VALUE;
247 static int step = 0;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int idct = FF_IDCT_AUTO;
252 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
255 static int error_concealment = 3;
256 static int decoder_reorder_pts = -1;
257 static int noautoexit;
258 static int exit_on_keydown;
259 static int exit_on_mousedown;
260 static int loop = 1;
261 static int framedrop = 1;
262 static int infinite_buffer = 0;
263
264 static int rdftspeed = 20;
265 #if CONFIG_AVFILTER
266 static char *vfilters = NULL;
267 #endif
268
269 /* current context */
270 static int is_full_screen;
271 static VideoState *cur_stream;
272 static int64_t audio_callback_time;
273
274 static AVPacket flush_pkt;
275
276 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
277 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
278 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
279
280 static SDL_Surface *screen;
281
282 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
283
284 /* packet queue handling */
285 static void packet_queue_init(PacketQueue *q)
286 {
287     memset(q, 0, sizeof(PacketQueue));
288     q->mutex = SDL_CreateMutex();
289     q->cond = SDL_CreateCond();
290     packet_queue_put(q, &flush_pkt);
291 }
292
293 static void packet_queue_flush(PacketQueue *q)
294 {
295     AVPacketList *pkt, *pkt1;
296
297     SDL_LockMutex(q->mutex);
298     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
299         pkt1 = pkt->next;
300         av_free_packet(&pkt->pkt);
301         av_freep(&pkt);
302     }
303     q->last_pkt = NULL;
304     q->first_pkt = NULL;
305     q->nb_packets = 0;
306     q->size = 0;
307     SDL_UnlockMutex(q->mutex);
308 }
309
310 static void packet_queue_end(PacketQueue *q)
311 {
312     packet_queue_flush(q);
313     SDL_DestroyMutex(q->mutex);
314     SDL_DestroyCond(q->cond);
315 }
316
317 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
318 {
319     AVPacketList *pkt1;
320
321     /* duplicate the packet */
322     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
323         return -1;
324
325     pkt1 = av_malloc(sizeof(AVPacketList));
326     if (!pkt1)
327         return -1;
328     pkt1->pkt = *pkt;
329     pkt1->next = NULL;
330
331
332     SDL_LockMutex(q->mutex);
333
334     if (!q->last_pkt)
335
336         q->first_pkt = pkt1;
337     else
338         q->last_pkt->next = pkt1;
339     q->last_pkt = pkt1;
340     q->nb_packets++;
341     q->size += pkt1->pkt.size + sizeof(*pkt1);
342     /* XXX: should duplicate packet data in DV case */
343     SDL_CondSignal(q->cond);
344
345     SDL_UnlockMutex(q->mutex);
346     return 0;
347 }
348
349 static void packet_queue_abort(PacketQueue *q)
350 {
351     SDL_LockMutex(q->mutex);
352
353     q->abort_request = 1;
354
355     SDL_CondSignal(q->cond);
356
357     SDL_UnlockMutex(q->mutex);
358 }
359
360 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
361 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
362 {
363     AVPacketList *pkt1;
364     int ret;
365
366     SDL_LockMutex(q->mutex);
367
368     for (;;) {
369         if (q->abort_request) {
370             ret = -1;
371             break;
372         }
373
374         pkt1 = q->first_pkt;
375         if (pkt1) {
376             q->first_pkt = pkt1->next;
377             if (!q->first_pkt)
378                 q->last_pkt = NULL;
379             q->nb_packets--;
380             q->size -= pkt1->pkt.size + sizeof(*pkt1);
381             *pkt = pkt1->pkt;
382             av_free(pkt1);
383             ret = 1;
384             break;
385         } else if (!block) {
386             ret = 0;
387             break;
388         } else {
389             SDL_CondWait(q->cond, q->mutex);
390         }
391     }
392     SDL_UnlockMutex(q->mutex);
393     return ret;
394 }
395
396 static inline void fill_rectangle(SDL_Surface *screen,
397                                   int x, int y, int w, int h, int color)
398 {
399     SDL_Rect rect;
400     rect.x = x;
401     rect.y = y;
402     rect.w = w;
403     rect.h = h;
404     SDL_FillRect(screen, &rect, color);
405 }
406
407 #define ALPHA_BLEND(a, oldp, newp, s)\
408 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409
410 #define RGBA_IN(r, g, b, a, s)\
411 {\
412     unsigned int v = ((const uint32_t *)(s))[0];\
413     a = (v >> 24) & 0xff;\
414     r = (v >> 16) & 0xff;\
415     g = (v >> 8) & 0xff;\
416     b = v & 0xff;\
417 }
418
419 #define YUVA_IN(y, u, v, a, s, pal)\
420 {\
421     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422     a = (val >> 24) & 0xff;\
423     y = (val >> 16) & 0xff;\
424     u = (val >> 8) & 0xff;\
425     v = val & 0xff;\
426 }
427
428 #define YUVA_OUT(d, y, u, v, a)\
429 {\
430     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431 }
432
433
434 #define BPP 1
435
436 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437 {
438     int wrap, wrap3, width2, skip2;
439     int y, u, v, a, u1, v1, a1, w, h;
440     uint8_t *lum, *cb, *cr;
441     const uint8_t *p;
442     const uint32_t *pal;
443     int dstx, dsty, dstw, dsth;
444
445     dstw = av_clip(rect->w, 0, imgw);
446     dsth = av_clip(rect->h, 0, imgh);
447     dstx = av_clip(rect->x, 0, imgw - dstw);
448     dsty = av_clip(rect->y, 0, imgh - dsth);
449     lum = dst->data[0] + dsty * dst->linesize[0];
450     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452
453     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454     skip2 = dstx >> 1;
455     wrap = dst->linesize[0];
456     wrap3 = rect->pict.linesize[0];
457     p = rect->pict.data[0];
458     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459
460     if (dsty & 1) {
461         lum += dstx;
462         cb += skip2;
463         cr += skip2;
464
465         if (dstx & 1) {
466             YUVA_IN(y, u, v, a, p, pal);
467             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470             cb++;
471             cr++;
472             lum++;
473             p += BPP;
474         }
475         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
476             YUVA_IN(y, u, v, a, p, pal);
477             u1 = u;
478             v1 = v;
479             a1 = a;
480             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
482             YUVA_IN(y, u, v, a, p + BPP, pal);
483             u1 += u;
484             v1 += v;
485             a1 += a;
486             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489             cb++;
490             cr++;
491             p += 2 * BPP;
492             lum += 2;
493         }
494         if (w) {
495             YUVA_IN(y, u, v, a, p, pal);
496             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499             p++;
500             lum++;
501         }
502         p += wrap3 - dstw * BPP;
503         lum += wrap - dstw - dstx;
504         cb += dst->linesize[1] - width2 - skip2;
505         cr += dst->linesize[2] - width2 - skip2;
506     }
507     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
508         lum += dstx;
509         cb += skip2;
510         cr += skip2;
511
512         if (dstx & 1) {
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 = u;
515             v1 = v;
516             a1 = a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             p += wrap3;
519             lum += wrap;
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 += u;
522             v1 += v;
523             a1 += a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527             cb++;
528             cr++;
529             p += -wrap3 + BPP;
530             lum += -wrap + 1;
531         }
532         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
533             YUVA_IN(y, u, v, a, p, pal);
534             u1 = u;
535             v1 = v;
536             a1 = a;
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538
539             YUVA_IN(y, u, v, a, p + BPP, pal);
540             u1 += u;
541             v1 += v;
542             a1 += a;
543             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544             p += wrap3;
545             lum += wrap;
546
547             YUVA_IN(y, u, v, a, p, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552
553             YUVA_IN(y, u, v, a, p + BPP, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558
559             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561
562             cb++;
563             cr++;
564             p += -wrap3 + 2 * BPP;
565             lum += -wrap + 2;
566         }
567         if (w) {
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 = u;
570             v1 = v;
571             a1 = a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             p += wrap3;
574             lum += wrap;
575             YUVA_IN(y, u, v, a, p, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582             cb++;
583             cr++;
584             p += -wrap3 + BPP;
585             lum += -wrap + 1;
586         }
587         p += wrap3 + (wrap3 - dstw * BPP);
588         lum += wrap + (wrap - dstw - dstx);
589         cb += dst->linesize[1] - width2 - skip2;
590         cr += dst->linesize[2] - width2 - skip2;
591     }
592     /* handle odd height */
593     if (h) {
594         lum += dstx;
595         cb += skip2;
596         cr += skip2;
597
598         if (dstx & 1) {
599             YUVA_IN(y, u, v, a, p, pal);
600             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603             cb++;
604             cr++;
605             lum++;
606             p += BPP;
607         }
608         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
615             YUVA_IN(y, u, v, a, p + BPP, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622             cb++;
623             cr++;
624             p += 2 * BPP;
625             lum += 2;
626         }
627         if (w) {
628             YUVA_IN(y, u, v, a, p, pal);
629             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632         }
633     }
634 }
635
636 static void free_subpicture(SubPicture *sp)
637 {
638     avsubtitle_free(&sp->sub);
639 }
640
641 static void video_image_display(VideoState *is)
642 {
643     VideoPicture *vp;
644     SubPicture *sp;
645     AVPicture pict;
646     float aspect_ratio;
647     int width, height, x, y;
648     SDL_Rect rect;
649     int i;
650
651     vp = &is->pictq[is->pictq_rindex];
652     if (vp->bmp) {
653 #if CONFIG_AVFILTER
654          if (!vp->sar.num)
655              aspect_ratio = 0;
656          else
657              aspect_ratio = av_q2d(vp->sar);
658 #else
659
660         /* XXX: use variable in the frame */
661         if (is->video_st->sample_aspect_ratio.num)
662             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
663         else if (is->video_st->codec->sample_aspect_ratio.num)
664             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
665         else
666             aspect_ratio = 0;
667 #endif
668         if (aspect_ratio <= 0.0)
669             aspect_ratio = 1.0;
670         aspect_ratio *= (float)vp->width / (float)vp->height;
671
672         if (is->subtitle_st)
673         {
674             if (is->subpq_size > 0)
675             {
676                 sp = &is->subpq[is->subpq_rindex];
677
678                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
679                 {
680                     SDL_LockYUVOverlay (vp->bmp);
681
682                     pict.data[0] = vp->bmp->pixels[0];
683                     pict.data[1] = vp->bmp->pixels[2];
684                     pict.data[2] = vp->bmp->pixels[1];
685
686                     pict.linesize[0] = vp->bmp->pitches[0];
687                     pict.linesize[1] = vp->bmp->pitches[2];
688                     pict.linesize[2] = vp->bmp->pitches[1];
689
690                     for (i = 0; i < sp->sub.num_rects; i++)
691                         blend_subrect(&pict, sp->sub.rects[i],
692                                       vp->bmp->w, vp->bmp->h);
693
694                     SDL_UnlockYUVOverlay (vp->bmp);
695                 }
696             }
697         }
698
699
700         /* XXX: we suppose the screen has a 1.0 pixel ratio */
701         height = is->height;
702         width = ((int)rint(height * aspect_ratio)) & ~1;
703         if (width > is->width) {
704             width = is->width;
705             height = ((int)rint(width / aspect_ratio)) & ~1;
706         }
707         x = (is->width - width) / 2;
708         y = (is->height - height) / 2;
709         is->no_background = 0;
710         rect.x = is->xleft + x;
711         rect.y = is->ytop  + y;
712         rect.w = width;
713         rect.h = height;
714         SDL_DisplayYUVOverlay(vp->bmp, &rect);
715     }
716 }
717
718 /* get the current audio output buffer size, in samples. With SDL, we
719    cannot have a precise information */
720 static int audio_write_get_buf_size(VideoState *is)
721 {
722     return is->audio_buf_size - is->audio_buf_index;
723 }
724
725 static inline int compute_mod(int a, int b)
726 {
727     a = a % b;
728     if (a >= 0)
729         return a;
730     else
731         return a + b;
732 }
733
734 static void video_audio_display(VideoState *s)
735 {
736     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
737     int ch, channels, h, h2, bgcolor, fgcolor;
738     int16_t time_diff;
739     int rdft_bits, nb_freq;
740
741     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
742         ;
743     nb_freq = 1 << (rdft_bits - 1);
744
745     /* compute display index : center on currently output samples */
746     channels = s->sdl_channels;
747     nb_display_channels = channels;
748     if (!s->paused) {
749         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
750         n = 2 * channels;
751         delay = audio_write_get_buf_size(s);
752         delay /= n;
753
754         /* to be more precise, we take into account the time spent since
755            the last buffer computation */
756         if (audio_callback_time) {
757             time_diff = av_gettime_relative() - audio_callback_time;
758             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
759         }
760
761         delay += 2 * data_used;
762         if (delay < data_used)
763             delay = data_used;
764
765         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
766         if (s->show_audio == 1) {
767             h = INT_MIN;
768             for (i = 0; i < 1000; i += channels) {
769                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
770                 int a = s->sample_array[idx];
771                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
772                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
773                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
774                 int score = a - d;
775                 if (h < score && (b ^ c) < 0) {
776                     h = score;
777                     i_start = idx;
778                 }
779             }
780         }
781
782         s->last_i_start = i_start;
783     } else {
784         i_start = s->last_i_start;
785     }
786
787     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788     if (s->show_audio == 1) {
789         fill_rectangle(screen,
790                        s->xleft, s->ytop, s->width, s->height,
791                        bgcolor);
792
793         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
794
795         /* total height for one channel */
796         h = s->height / nb_display_channels;
797         /* graph height / 2 */
798         h2 = (h * 9) / 20;
799         for (ch = 0; ch < nb_display_channels; ch++) {
800             i = i_start + ch;
801             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
802             for (x = 0; x < s->width; x++) {
803                 y = (s->sample_array[i] * h2) >> 15;
804                 if (y < 0) {
805                     y = -y;
806                     ys = y1 - y;
807                 } else {
808                     ys = y1;
809                 }
810                 fill_rectangle(screen,
811                                s->xleft + x, ys, 1, y,
812                                fgcolor);
813                 i += channels;
814                 if (i >= SAMPLE_ARRAY_SIZE)
815                     i -= SAMPLE_ARRAY_SIZE;
816             }
817         }
818
819         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
820
821         for (ch = 1; ch < nb_display_channels; ch++) {
822             y = s->ytop + ch * h;
823             fill_rectangle(screen,
824                            s->xleft, y, s->width, 1,
825                            fgcolor);
826         }
827         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
828     } else {
829         nb_display_channels= FFMIN(nb_display_channels, 2);
830         if (rdft_bits != s->rdft_bits) {
831             av_rdft_end(s->rdft);
832             av_free(s->rdft_data);
833             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
834             s->rdft_bits = rdft_bits;
835             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
836         }
837         {
838             FFTSample *data[2];
839             for (ch = 0; ch < nb_display_channels; ch++) {
840                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
841                 i = i_start + ch;
842                 for (x = 0; x < 2 * nb_freq; x++) {
843                     double w = (x-nb_freq) * (1.0 / nb_freq);
844                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
845                     i += channels;
846                     if (i >= SAMPLE_ARRAY_SIZE)
847                         i -= SAMPLE_ARRAY_SIZE;
848                 }
849                 av_rdft_calc(s->rdft, data[ch]);
850             }
851             /* Least efficient way to do this, we should of course
852              * directly access it but it is more than fast enough. */
853             for (y = 0; y < s->height; y++) {
854                 double w = 1 / sqrt(nb_freq);
855                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
856                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
857                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
858                 a = FFMIN(a, 255);
859                 b = FFMIN(b, 255);
860                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
861
862                 fill_rectangle(screen,
863                             s->xpos, s->height-y, 1, 1,
864                             fgcolor);
865             }
866         }
867         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
868         s->xpos++;
869         if (s->xpos >= s->width)
870             s->xpos= s->xleft;
871     }
872 }
873
874 static int video_open(VideoState *is)
875 {
876     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
877     int w,h;
878
879     if (is_full_screen) flags |= SDL_FULLSCREEN;
880     else                flags |= SDL_RESIZABLE;
881
882     if (is_full_screen && fs_screen_width) {
883         w = fs_screen_width;
884         h = fs_screen_height;
885     } else if (!is_full_screen && screen_width) {
886         w = screen_width;
887         h = screen_height;
888 #if CONFIG_AVFILTER
889     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
890         w = is->out_video_filter->inputs[0]->w;
891         h = is->out_video_filter->inputs[0]->h;
892 #else
893     } else if (is->video_st && is->video_st->codec->width) {
894         w = is->video_st->codec->width;
895         h = is->video_st->codec->height;
896 #endif
897     } else {
898         w = 640;
899         h = 480;
900     }
901     if (screen && is->width == screen->w && screen->w == w
902        && is->height== screen->h && screen->h == h)
903         return 0;
904
905 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
906     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
907     screen = SDL_SetVideoMode(w, h, 24, flags);
908 #else
909     screen = SDL_SetVideoMode(w, h, 0, flags);
910 #endif
911     if (!screen) {
912         fprintf(stderr, "SDL: could not set video mode - exiting\n");
913         return -1;
914     }
915     if (!window_title)
916         window_title = input_filename;
917     SDL_WM_SetCaption(window_title, window_title);
918
919     is->width  = screen->w;
920     is->height = screen->h;
921
922     return 0;
923 }
924
925 /* display the current picture, if any */
926 static void video_display(VideoState *is)
927 {
928     if (!screen)
929         video_open(cur_stream);
930     if (is->audio_st && is->show_audio)
931         video_audio_display(is);
932     else if (is->video_st)
933         video_image_display(is);
934 }
935
936 static int refresh_thread(void *opaque)
937 {
938     VideoState *is= opaque;
939     while (!is->abort_request) {
940         SDL_Event event;
941         event.type = FF_REFRESH_EVENT;
942         event.user.data1 = opaque;
943         if (!is->refresh) {
944             is->refresh = 1;
945             SDL_PushEvent(&event);
946         }
947         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
948     }
949     return 0;
950 }
951
952 /* get the current audio clock value */
953 static double get_audio_clock(VideoState *is)
954 {
955     double pts;
956     int hw_buf_size, bytes_per_sec;
957     pts = is->audio_clock;
958     hw_buf_size = audio_write_get_buf_size(is);
959     bytes_per_sec = 0;
960     if (is->audio_st) {
961         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
962                         av_get_bytes_per_sample(is->sdl_sample_fmt);
963     }
964     if (bytes_per_sec)
965         pts -= (double)hw_buf_size / bytes_per_sec;
966     return pts;
967 }
968
969 /* get the current video clock value */
970 static double get_video_clock(VideoState *is)
971 {
972     if (is->paused) {
973         return is->video_current_pts;
974     } else {
975         return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
976     }
977 }
978
979 /* get the current external clock value */
980 static double get_external_clock(VideoState *is)
981 {
982     int64_t ti;
983     ti = av_gettime_relative();
984     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
985 }
986
987 /* get the current master clock value */
988 static double get_master_clock(VideoState *is)
989 {
990     double val;
991
992     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
993         if (is->video_st)
994             val = get_video_clock(is);
995         else
996             val = get_audio_clock(is);
997     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
998         if (is->audio_st)
999             val = get_audio_clock(is);
1000         else
1001             val = get_video_clock(is);
1002     } else {
1003         val = get_external_clock(is);
1004     }
1005     return val;
1006 }
1007
1008 /* seek in the stream */
1009 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1010 {
1011     if (!is->seek_req) {
1012         is->seek_pos = pos;
1013         is->seek_rel = rel;
1014         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1015         if (seek_by_bytes)
1016             is->seek_flags |= AVSEEK_FLAG_BYTE;
1017         is->seek_req = 1;
1018     }
1019 }
1020
1021 /* pause or resume the video */
1022 static void stream_pause(VideoState *is)
1023 {
1024     if (is->paused) {
1025         is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1026         if (is->read_pause_return != AVERROR(ENOSYS)) {
1027             is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
1028         }
1029         is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
1030     }
1031     is->paused = !is->paused;
1032 }
1033
1034 static double compute_target_time(double frame_current_pts, VideoState *is)
1035 {
1036     double delay, sync_threshold, diff = 0;
1037
1038     /* compute nominal delay */
1039     delay = frame_current_pts - is->frame_last_pts;
1040     if (delay <= 0 || delay >= 10.0) {
1041         /* if incorrect delay, use previous one */
1042         delay = is->frame_last_delay;
1043     } else {
1044         is->frame_last_delay = delay;
1045     }
1046     is->frame_last_pts = frame_current_pts;
1047
1048     /* update delay to follow master synchronisation source */
1049     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1050          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1051         /* if video is slave, we try to correct big delays by
1052            duplicating or deleting a frame */
1053         diff = get_video_clock(is) - get_master_clock(is);
1054
1055         /* skip or repeat frame. We take into account the
1056            delay to compute the threshold. I still don't know
1057            if it is the best guess */
1058         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1059         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1060             if (diff <= -sync_threshold)
1061                 delay = 0;
1062             else if (diff >= sync_threshold)
1063                 delay = 2 * delay;
1064         }
1065     }
1066     is->frame_timer += delay;
1067
1068     av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1069             delay, frame_current_pts, -diff);
1070
1071     return is->frame_timer;
1072 }
1073
1074 /* called to display each frame */
1075 static void video_refresh_timer(void *opaque)
1076 {
1077     VideoState *is = opaque;
1078     VideoPicture *vp;
1079
1080     SubPicture *sp, *sp2;
1081
1082     if (is->video_st) {
1083 retry:
1084         if (is->pictq_size == 0) {
1085             // nothing to do, no picture to display in the que
1086         } else {
1087             double time = av_gettime_relative() / 1000000.0;
1088             double next_target;
1089             /* dequeue the picture */
1090             vp = &is->pictq[is->pictq_rindex];
1091
1092             if (time < vp->target_clock)
1093                 return;
1094             /* update current video pts */
1095             is->video_current_pts = vp->pts;
1096             is->video_current_pts_drift = is->video_current_pts - time;
1097             is->video_current_pos = vp->pos;
1098             if (is->pictq_size > 1) {
1099                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1100                 assert(nextvp->target_clock >= vp->target_clock);
1101                 next_target= nextvp->target_clock;
1102             } else {
1103                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1104             }
1105             if (framedrop && time > next_target) {
1106                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1107                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1108                     /* update queue size and signal for next picture */
1109                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1110                         is->pictq_rindex = 0;
1111
1112                     SDL_LockMutex(is->pictq_mutex);
1113                     is->pictq_size--;
1114                     SDL_CondSignal(is->pictq_cond);
1115                     SDL_UnlockMutex(is->pictq_mutex);
1116                     goto retry;
1117                 }
1118             }
1119
1120             if (is->subtitle_st) {
1121                 if (is->subtitle_stream_changed) {
1122                     SDL_LockMutex(is->subpq_mutex);
1123
1124                     while (is->subpq_size) {
1125                         free_subpicture(&is->subpq[is->subpq_rindex]);
1126
1127                         /* update queue size and signal for next picture */
1128                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1129                             is->subpq_rindex = 0;
1130
1131                         is->subpq_size--;
1132                     }
1133                     is->subtitle_stream_changed = 0;
1134
1135                     SDL_CondSignal(is->subpq_cond);
1136                     SDL_UnlockMutex(is->subpq_mutex);
1137                 } else {
1138                     if (is->subpq_size > 0) {
1139                         sp = &is->subpq[is->subpq_rindex];
1140
1141                         if (is->subpq_size > 1)
1142                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1143                         else
1144                             sp2 = NULL;
1145
1146                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1147                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1148                         {
1149                             free_subpicture(sp);
1150
1151                             /* update queue size and signal for next picture */
1152                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1153                                 is->subpq_rindex = 0;
1154
1155                             SDL_LockMutex(is->subpq_mutex);
1156                             is->subpq_size--;
1157                             SDL_CondSignal(is->subpq_cond);
1158                             SDL_UnlockMutex(is->subpq_mutex);
1159                         }
1160                     }
1161                 }
1162             }
1163
1164             /* display picture */
1165             if (!display_disable)
1166                 video_display(is);
1167
1168             /* update queue size and signal for next picture */
1169             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1170                 is->pictq_rindex = 0;
1171
1172             SDL_LockMutex(is->pictq_mutex);
1173             is->pictq_size--;
1174             SDL_CondSignal(is->pictq_cond);
1175             SDL_UnlockMutex(is->pictq_mutex);
1176         }
1177     } else if (is->audio_st) {
1178         /* draw the next audio frame */
1179
1180         /* if only audio stream, then display the audio bars (better
1181            than nothing, just to test the implementation */
1182
1183         /* display picture */
1184         if (!display_disable)
1185             video_display(is);
1186     }
1187     if (show_status) {
1188         static int64_t last_time;
1189         int64_t cur_time;
1190         int aqsize, vqsize, sqsize;
1191         double av_diff;
1192
1193         cur_time = av_gettime_relative();
1194         if (!last_time || (cur_time - last_time) >= 30000) {
1195             aqsize = 0;
1196             vqsize = 0;
1197             sqsize = 0;
1198             if (is->audio_st)
1199                 aqsize = is->audioq.size;
1200             if (is->video_st)
1201                 vqsize = is->videoq.size;
1202             if (is->subtitle_st)
1203                 sqsize = is->subtitleq.size;
1204             av_diff = 0;
1205             if (is->audio_st && is->video_st)
1206                 av_diff = get_audio_clock(is) - get_video_clock(is);
1207             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1208                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1209                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1210             fflush(stdout);
1211             last_time = cur_time;
1212         }
1213     }
1214 }
1215
1216 static void stream_close(VideoState *is)
1217 {
1218     VideoPicture *vp;
1219     int i;
1220     /* XXX: use a special url_shutdown call to abort parse cleanly */
1221     is->abort_request = 1;
1222     SDL_WaitThread(is->parse_tid, NULL);
1223     SDL_WaitThread(is->refresh_tid, NULL);
1224
1225     /* free all pictures */
1226     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1227         vp = &is->pictq[i];
1228         if (vp->bmp) {
1229             SDL_FreeYUVOverlay(vp->bmp);
1230             vp->bmp = NULL;
1231         }
1232     }
1233     SDL_DestroyMutex(is->pictq_mutex);
1234     SDL_DestroyCond(is->pictq_cond);
1235     SDL_DestroyMutex(is->subpq_mutex);
1236     SDL_DestroyCond(is->subpq_cond);
1237 #if !CONFIG_AVFILTER
1238     if (is->img_convert_ctx)
1239         sws_freeContext(is->img_convert_ctx);
1240 #endif
1241     av_free(is);
1242 }
1243
1244 static void do_exit(void)
1245 {
1246     if (cur_stream) {
1247         stream_close(cur_stream);
1248         cur_stream = NULL;
1249     }
1250     uninit_opts();
1251     avformat_network_deinit();
1252     if (show_status)
1253         printf("\n");
1254     SDL_Quit();
1255     av_log(NULL, AV_LOG_QUIET, "");
1256     exit(0);
1257 }
1258
1259 /* allocate a picture (needs to do that in main thread to avoid
1260    potential locking problems */
1261 static void alloc_picture(void *opaque)
1262 {
1263     VideoState *is = opaque;
1264     VideoPicture *vp;
1265
1266     vp = &is->pictq[is->pictq_windex];
1267
1268     if (vp->bmp)
1269         SDL_FreeYUVOverlay(vp->bmp);
1270
1271 #if CONFIG_AVFILTER
1272     vp->width   = is->out_video_filter->inputs[0]->w;
1273     vp->height  = is->out_video_filter->inputs[0]->h;
1274     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1275 #else
1276     vp->width   = is->video_st->codec->width;
1277     vp->height  = is->video_st->codec->height;
1278     vp->pix_fmt = is->video_st->codec->pix_fmt;
1279 #endif
1280
1281     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1282                                    SDL_YV12_OVERLAY,
1283                                    screen);
1284     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1285         /* SDL allocates a buffer smaller than requested if the video
1286          * overlay hardware is unable to support the requested size. */
1287         fprintf(stderr, "Error: the video system does not support an image\n"
1288                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1289                         "to reduce the image size.\n", vp->width, vp->height );
1290         do_exit();
1291     }
1292
1293     SDL_LockMutex(is->pictq_mutex);
1294     vp->allocated = 1;
1295     SDL_CondSignal(is->pictq_cond);
1296     SDL_UnlockMutex(is->pictq_mutex);
1297 }
1298
1299 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1300  * guessed if not known. */
1301 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1302 {
1303     VideoPicture *vp;
1304 #if CONFIG_AVFILTER
1305     AVPicture pict_src;
1306 #else
1307     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1308 #endif
1309     /* wait until we have space to put a new picture */
1310     SDL_LockMutex(is->pictq_mutex);
1311
1312     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1313         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1314
1315     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1316            !is->videoq.abort_request) {
1317         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1318     }
1319     SDL_UnlockMutex(is->pictq_mutex);
1320
1321     if (is->videoq.abort_request)
1322         return -1;
1323
1324     vp = &is->pictq[is->pictq_windex];
1325
1326     vp->sar = src_frame->sample_aspect_ratio;
1327
1328     /* alloc or resize hardware picture buffer */
1329     if (!vp->bmp || vp->reallocate ||
1330 #if CONFIG_AVFILTER
1331         vp->width  != is->out_video_filter->inputs[0]->w ||
1332         vp->height != is->out_video_filter->inputs[0]->h) {
1333 #else
1334         vp->width != is->video_st->codec->width ||
1335         vp->height != is->video_st->codec->height) {
1336 #endif
1337         SDL_Event event;
1338
1339         vp->allocated  = 0;
1340         vp->reallocate = 0;
1341
1342         /* the allocation must be done in the main thread to avoid
1343            locking problems */
1344         event.type = FF_ALLOC_EVENT;
1345         event.user.data1 = is;
1346         SDL_PushEvent(&event);
1347
1348         /* wait until the picture is allocated */
1349         SDL_LockMutex(is->pictq_mutex);
1350         while (!vp->allocated && !is->videoq.abort_request) {
1351             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352         }
1353         SDL_UnlockMutex(is->pictq_mutex);
1354
1355         if (is->videoq.abort_request)
1356             return -1;
1357     }
1358
1359     /* if the frame is not skipped, then display it */
1360     if (vp->bmp) {
1361         AVPicture pict = { { 0 } };
1362
1363         /* get a pointer on the bitmap */
1364         SDL_LockYUVOverlay (vp->bmp);
1365
1366         pict.data[0] = vp->bmp->pixels[0];
1367         pict.data[1] = vp->bmp->pixels[2];
1368         pict.data[2] = vp->bmp->pixels[1];
1369
1370         pict.linesize[0] = vp->bmp->pitches[0];
1371         pict.linesize[1] = vp->bmp->pitches[2];
1372         pict.linesize[2] = vp->bmp->pitches[1];
1373
1374 #if CONFIG_AVFILTER
1375         pict_src.data[0] = src_frame->data[0];
1376         pict_src.data[1] = src_frame->data[1];
1377         pict_src.data[2] = src_frame->data[2];
1378
1379         pict_src.linesize[0] = src_frame->linesize[0];
1380         pict_src.linesize[1] = src_frame->linesize[1];
1381         pict_src.linesize[2] = src_frame->linesize[2];
1382
1383         // FIXME use direct rendering
1384         av_picture_copy(&pict, &pict_src,
1385                         vp->pix_fmt, vp->width, vp->height);
1386 #else
1387         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1388         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1389             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1390             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1391         if (!is->img_convert_ctx) {
1392             fprintf(stderr, "Cannot initialize the conversion context\n");
1393             exit(1);
1394         }
1395         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1396                   0, vp->height, pict.data, pict.linesize);
1397 #endif
1398         /* update the bitmap content */
1399         SDL_UnlockYUVOverlay(vp->bmp);
1400
1401         vp->pts = pts;
1402         vp->pos = pos;
1403
1404         /* now we can update the picture count */
1405         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1406             is->pictq_windex = 0;
1407         SDL_LockMutex(is->pictq_mutex);
1408         vp->target_clock = compute_target_time(vp->pts, is);
1409
1410         is->pictq_size++;
1411         SDL_UnlockMutex(is->pictq_mutex);
1412     }
1413     return 0;
1414 }
1415
1416 /* Compute the exact PTS for the picture if it is omitted in the stream.
1417  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1418 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1419 {
1420     double frame_delay, pts;
1421     int ret;
1422
1423     pts = pts1;
1424
1425     if (pts != 0) {
1426         /* update video clock with pts, if present */
1427         is->video_clock = pts;
1428     } else {
1429         pts = is->video_clock;
1430     }
1431     /* update video clock for next frame */
1432     frame_delay = av_q2d(is->video_st->codec->time_base);
1433     /* for MPEG2, the frame can be repeated, so we update the
1434        clock accordingly */
1435     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1436     is->video_clock += frame_delay;
1437
1438     ret = queue_picture(is, src_frame, pts, pos);
1439     av_frame_unref(src_frame);
1440     return ret;
1441 }
1442
1443 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1444 {
1445     int got_picture, i;
1446
1447     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1448         return -1;
1449
1450     if (pkt->data == flush_pkt.data) {
1451         avcodec_flush_buffers(is->video_st->codec);
1452
1453         SDL_LockMutex(is->pictq_mutex);
1454         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1455         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1456             is->pictq[i].target_clock= 0;
1457         }
1458         while (is->pictq_size && !is->videoq.abort_request) {
1459             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1460         }
1461         is->video_current_pos = -1;
1462         SDL_UnlockMutex(is->pictq_mutex);
1463
1464         init_pts_correction(&is->pts_ctx);
1465         is->frame_last_pts = AV_NOPTS_VALUE;
1466         is->frame_last_delay = 0;
1467         is->frame_timer = (double)av_gettime_relative() / 1000000.0;
1468         is->skip_frames = 1;
1469         is->skip_frames_index = 0;
1470         return 0;
1471     }
1472
1473     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1474
1475     if (got_picture) {
1476         if (decoder_reorder_pts == -1) {
1477             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1478         } else if (decoder_reorder_pts) {
1479             *pts = frame->pkt_pts;
1480         } else {
1481             *pts = frame->pkt_dts;
1482         }
1483
1484         if (*pts == AV_NOPTS_VALUE) {
1485             *pts = 0;
1486         }
1487         if (is->video_st->sample_aspect_ratio.num) {
1488             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1489         }
1490
1491         is->skip_frames_index += 1;
1492         if (is->skip_frames_index >= is->skip_frames) {
1493             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1494             return 1;
1495         }
1496         av_frame_unref(frame);
1497     }
1498     return 0;
1499 }
1500
1501 #if CONFIG_AVFILTER
1502 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1503 {
1504     char sws_flags_str[128];
1505     char buffersrc_args[256];
1506     int ret;
1507     AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
1508     AVCodecContext *codec = is->video_st->codec;
1509
1510     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1511     graph->scale_sws_opts = av_strdup(sws_flags_str);
1512
1513     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1514              codec->width, codec->height, codec->pix_fmt,
1515              is->video_st->time_base.num, is->video_st->time_base.den,
1516              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1517
1518
1519     if ((ret = avfilter_graph_create_filter(&filt_src,
1520                                             avfilter_get_by_name("buffer"),
1521                                             "src", buffersrc_args, NULL,
1522                                             graph)) < 0)
1523         return ret;
1524     if ((ret = avfilter_graph_create_filter(&filt_out,
1525                                             avfilter_get_by_name("buffersink"),
1526                                             "out", NULL, NULL, graph)) < 0)
1527         return ret;
1528
1529     last_filter = filt_out;
1530
1531 /* Note: this macro adds a filter before the lastly added filter, so the
1532  * processing order of the filters is in reverse */
1533 #define INSERT_FILT(name, arg) do {                                          \
1534     AVFilterContext *filt_ctx;                                               \
1535                                                                              \
1536     ret = avfilter_graph_create_filter(&filt_ctx,                            \
1537                                        avfilter_get_by_name(name),           \
1538                                        "avplay_" name, arg, NULL, graph);    \
1539     if (ret < 0)                                                             \
1540         return ret;                                                          \
1541                                                                              \
1542     ret = avfilter_link(filt_ctx, 0, last_filter, 0);                        \
1543     if (ret < 0)                                                             \
1544         return ret;                                                          \
1545                                                                              \
1546     last_filter = filt_ctx;                                                  \
1547 } while (0)
1548
1549     INSERT_FILT("format", "yuv420p");
1550
1551     if (vfilters) {
1552         AVFilterInOut *outputs = avfilter_inout_alloc();
1553         AVFilterInOut *inputs  = avfilter_inout_alloc();
1554
1555         outputs->name    = av_strdup("in");
1556         outputs->filter_ctx = filt_src;
1557         outputs->pad_idx = 0;
1558         outputs->next    = NULL;
1559
1560         inputs->name    = av_strdup("out");
1561         inputs->filter_ctx = last_filter;
1562         inputs->pad_idx = 0;
1563         inputs->next    = NULL;
1564
1565         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1566             return ret;
1567     } else {
1568         if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
1569             return ret;
1570     }
1571
1572     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1573         return ret;
1574
1575     is->in_video_filter  = filt_src;
1576     is->out_video_filter = filt_out;
1577
1578     return ret;
1579 }
1580
1581 #endif  /* CONFIG_AVFILTER */
1582
1583 static int video_thread(void *arg)
1584 {
1585     AVPacket pkt = { 0 };
1586     VideoState *is = arg;
1587     AVFrame *frame = av_frame_alloc();
1588     int64_t pts_int;
1589     double pts;
1590     int ret;
1591
1592 #if CONFIG_AVFILTER
1593     AVFilterGraph *graph = avfilter_graph_alloc();
1594     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1595     int last_w = is->video_st->codec->width;
1596     int last_h = is->video_st->codec->height;
1597     if (!graph) {
1598         av_frame_free(&frame);
1599         return AVERROR(ENOMEM);
1600     }
1601
1602     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1603         goto the_end;
1604     filt_in  = is->in_video_filter;
1605     filt_out = is->out_video_filter;
1606 #endif
1607
1608     if (!frame) {
1609 #if CONFIG_AVFILTER
1610         avfilter_graph_free(&graph);
1611 #endif
1612         return AVERROR(ENOMEM);
1613     }
1614
1615     for (;;) {
1616 #if CONFIG_AVFILTER
1617         AVRational tb;
1618 #endif
1619         while (is->paused && !is->videoq.abort_request)
1620             SDL_Delay(10);
1621
1622         av_free_packet(&pkt);
1623
1624         ret = get_video_frame(is, frame, &pts_int, &pkt);
1625         if (ret < 0)
1626             goto the_end;
1627
1628         if (!ret)
1629             continue;
1630
1631 #if CONFIG_AVFILTER
1632         if (   last_w != is->video_st->codec->width
1633             || last_h != is->video_st->codec->height) {
1634             av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1635                     is->video_st->codec->width, is->video_st->codec->height);
1636             avfilter_graph_free(&graph);
1637             graph = avfilter_graph_alloc();
1638             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1639                 goto the_end;
1640             filt_in  = is->in_video_filter;
1641             filt_out = is->out_video_filter;
1642             last_w = is->video_st->codec->width;
1643             last_h = is->video_st->codec->height;
1644         }
1645
1646         frame->pts = pts_int;
1647         ret = av_buffersrc_add_frame(filt_in, frame);
1648         if (ret < 0)
1649             goto the_end;
1650
1651         while (ret >= 0) {
1652             ret = av_buffersink_get_frame(filt_out, frame);
1653             if (ret < 0) {
1654                 ret = 0;
1655                 break;
1656             }
1657
1658             pts_int = frame->pts;
1659             tb      = filt_out->inputs[0]->time_base;
1660             if (av_cmp_q(tb, is->video_st->time_base)) {
1661                 av_unused int64_t pts1 = pts_int;
1662                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1663                 av_log(NULL, AV_LOG_TRACE, "video_thread(): "
1664                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1665                         tb.num, tb.den, pts1,
1666                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1667             }
1668             pts = pts_int * av_q2d(is->video_st->time_base);
1669             ret = output_picture2(is, frame, pts, 0);
1670         }
1671 #else
1672         pts = pts_int * av_q2d(is->video_st->time_base);
1673         ret = output_picture2(is, frame, pts,  pkt.pos);
1674 #endif
1675
1676         if (ret < 0)
1677             goto the_end;
1678
1679
1680         if (step)
1681             if (cur_stream)
1682                 stream_pause(cur_stream);
1683     }
1684  the_end:
1685 #if CONFIG_AVFILTER
1686     av_freep(&vfilters);
1687     avfilter_graph_free(&graph);
1688 #endif
1689     av_free_packet(&pkt);
1690     av_frame_free(&frame);
1691     return 0;
1692 }
1693
1694 static int subtitle_thread(void *arg)
1695 {
1696     VideoState *is = arg;
1697     SubPicture *sp;
1698     AVPacket pkt1, *pkt = &pkt1;
1699     int got_subtitle;
1700     double pts;
1701     int i, j;
1702     int r, g, b, y, u, v, a;
1703
1704     for (;;) {
1705         while (is->paused && !is->subtitleq.abort_request) {
1706             SDL_Delay(10);
1707         }
1708         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1709             break;
1710
1711         if (pkt->data == flush_pkt.data) {
1712             avcodec_flush_buffers(is->subtitle_st->codec);
1713             continue;
1714         }
1715         SDL_LockMutex(is->subpq_mutex);
1716         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1717                !is->subtitleq.abort_request) {
1718             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1719         }
1720         SDL_UnlockMutex(is->subpq_mutex);
1721
1722         if (is->subtitleq.abort_request)
1723             return 0;
1724
1725         sp = &is->subpq[is->subpq_windex];
1726
1727        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1728            this packet, if any */
1729         pts = 0;
1730         if (pkt->pts != AV_NOPTS_VALUE)
1731             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1732
1733         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1734                                  &got_subtitle, pkt);
1735
1736         if (got_subtitle && sp->sub.format == 0) {
1737             sp->pts = pts;
1738
1739             for (i = 0; i < sp->sub.num_rects; i++)
1740             {
1741                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1742                 {
1743                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1744                     y = RGB_TO_Y_CCIR(r, g, b);
1745                     u = RGB_TO_U_CCIR(r, g, b, 0);
1746                     v = RGB_TO_V_CCIR(r, g, b, 0);
1747                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1748                 }
1749             }
1750
1751             /* now we can update the picture count */
1752             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1753                 is->subpq_windex = 0;
1754             SDL_LockMutex(is->subpq_mutex);
1755             is->subpq_size++;
1756             SDL_UnlockMutex(is->subpq_mutex);
1757         }
1758         av_free_packet(pkt);
1759     }
1760     return 0;
1761 }
1762
1763 /* copy samples for viewing in editor window */
1764 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1765 {
1766     int size, len;
1767
1768     size = samples_size / sizeof(short);
1769     while (size > 0) {
1770         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1771         if (len > size)
1772             len = size;
1773         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1774         samples += len;
1775         is->sample_array_index += len;
1776         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1777             is->sample_array_index = 0;
1778         size -= len;
1779     }
1780 }
1781
1782 /* return the new audio buffer size (samples can be added or deleted
1783    to get better sync if video or external master clock) */
1784 static int synchronize_audio(VideoState *is, short *samples,
1785                              int samples_size1, double pts)
1786 {
1787     int n, samples_size;
1788     double ref_clock;
1789
1790     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1791     samples_size = samples_size1;
1792
1793     /* if not master, then we try to remove or add samples to correct the clock */
1794     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1795          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1796         double diff, avg_diff;
1797         int wanted_size, min_size, max_size, nb_samples;
1798
1799         ref_clock = get_master_clock(is);
1800         diff = get_audio_clock(is) - ref_clock;
1801
1802         if (diff < AV_NOSYNC_THRESHOLD) {
1803             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1804             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1805                 /* not enough measures to have a correct estimate */
1806                 is->audio_diff_avg_count++;
1807             } else {
1808                 /* estimate the A-V difference */
1809                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1810
1811                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1812                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1813                     nb_samples = samples_size / n;
1814
1815                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1816                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1817                     if (wanted_size < min_size)
1818                         wanted_size = min_size;
1819                     else if (wanted_size > max_size)
1820                         wanted_size = max_size;
1821
1822                     /* add or remove samples to correction the synchro */
1823                     if (wanted_size < samples_size) {
1824                         /* remove samples */
1825                         samples_size = wanted_size;
1826                     } else if (wanted_size > samples_size) {
1827                         uint8_t *samples_end, *q;
1828                         int nb;
1829
1830                         /* add samples */
1831                         nb = (samples_size - wanted_size);
1832                         samples_end = (uint8_t *)samples + samples_size - n;
1833                         q = samples_end + n;
1834                         while (nb > 0) {
1835                             memcpy(q, samples_end, n);
1836                             q += n;
1837                             nb -= n;
1838                         }
1839                         samples_size = wanted_size;
1840                     }
1841                 }
1842                 av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1843                         diff, avg_diff, samples_size - samples_size1,
1844                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1845             }
1846         } else {
1847             /* too big difference : may be initial PTS errors, so
1848                reset A-V filter */
1849             is->audio_diff_avg_count = 0;
1850             is->audio_diff_cum       = 0;
1851         }
1852     }
1853
1854     return samples_size;
1855 }
1856
1857 /* decode one audio frame and returns its uncompressed size */
1858 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1859 {
1860     AVPacket *pkt_temp = &is->audio_pkt_temp;
1861     AVPacket *pkt = &is->audio_pkt;
1862     AVCodecContext *dec = is->audio_st->codec;
1863     int n, len1, data_size, got_frame;
1864     double pts;
1865     int new_packet = 0;
1866     int flush_complete = 0;
1867
1868     for (;;) {
1869         /* NOTE: the audio packet can contain several frames */
1870         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1871             int resample_changed, audio_resample;
1872
1873             if (!is->frame) {
1874                 if (!(is->frame = av_frame_alloc()))
1875                     return AVERROR(ENOMEM);
1876             }
1877
1878             if (flush_complete)
1879                 break;
1880             new_packet = 0;
1881             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1882             if (len1 < 0) {
1883                 /* if error, we skip the frame */
1884                 pkt_temp->size = 0;
1885                 break;
1886             }
1887
1888             pkt_temp->data += len1;
1889             pkt_temp->size -= len1;
1890
1891             if (!got_frame) {
1892                 /* stop sending empty packets if the decoder is finished */
1893                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1894                     flush_complete = 1;
1895                 continue;
1896             }
1897             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1898                                                    is->frame->nb_samples,
1899                                                    is->frame->format, 1);
1900
1901             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1902                              is->frame->channel_layout != is->sdl_channel_layout ||
1903                              is->frame->sample_rate    != is->sdl_sample_rate;
1904
1905             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1906                                is->frame->channel_layout != is->resample_channel_layout ||
1907                                is->frame->sample_rate    != is->resample_sample_rate;
1908
1909             if ((!is->avr && audio_resample) || resample_changed) {
1910                 int ret;
1911                 if (is->avr)
1912                     avresample_close(is->avr);
1913                 else if (audio_resample) {
1914                     is->avr = avresample_alloc_context();
1915                     if (!is->avr) {
1916                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1917                         break;
1918                     }
1919                 }
1920                 if (audio_resample) {
1921                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1922                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1923                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1924                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1925                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1926                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1927
1928                     if ((ret = avresample_open(is->avr)) < 0) {
1929                         fprintf(stderr, "error initializing libavresample\n");
1930                         break;
1931                     }
1932                 }
1933                 is->resample_sample_fmt     = is->frame->format;
1934                 is->resample_channel_layout = is->frame->channel_layout;
1935                 is->resample_sample_rate    = is->frame->sample_rate;
1936             }
1937
1938             if (audio_resample) {
1939                 void *tmp_out;
1940                 int out_samples, out_size, out_linesize;
1941                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1942                 int nb_samples = is->frame->nb_samples;
1943
1944                 out_size = av_samples_get_buffer_size(&out_linesize,
1945                                                       is->sdl_channels,
1946                                                       nb_samples,
1947                                                       is->sdl_sample_fmt, 0);
1948                 tmp_out = av_realloc(is->audio_buf1, out_size);
1949                 if (!tmp_out)
1950                     return AVERROR(ENOMEM);
1951                 is->audio_buf1 = tmp_out;
1952
1953                 out_samples = avresample_convert(is->avr,
1954                                                  &is->audio_buf1,
1955                                                  out_linesize, nb_samples,
1956                                                  is->frame->data,
1957                                                  is->frame->linesize[0],
1958                                                  is->frame->nb_samples);
1959                 if (out_samples < 0) {
1960                     fprintf(stderr, "avresample_convert() failed\n");
1961                     break;
1962                 }
1963                 is->audio_buf = is->audio_buf1;
1964                 data_size = out_samples * osize * is->sdl_channels;
1965             } else {
1966                 is->audio_buf = is->frame->data[0];
1967             }
1968
1969             /* if no pts, then compute it */
1970             pts = is->audio_clock;
1971             *pts_ptr = pts;
1972             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1973             is->audio_clock += (double)data_size /
1974                 (double)(n * is->sdl_sample_rate);
1975 #ifdef DEBUG
1976             {
1977                 static double last_clock;
1978                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1979                        is->audio_clock - last_clock,
1980                        is->audio_clock, pts);
1981                 last_clock = is->audio_clock;
1982             }
1983 #endif
1984             return data_size;
1985         }
1986
1987         /* free the current packet */
1988         if (pkt->data)
1989             av_free_packet(pkt);
1990         memset(pkt_temp, 0, sizeof(*pkt_temp));
1991
1992         if (is->paused || is->audioq.abort_request) {
1993             return -1;
1994         }
1995
1996         /* read next packet */
1997         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1998             return -1;
1999
2000         if (pkt->data == flush_pkt.data) {
2001             avcodec_flush_buffers(dec);
2002             flush_complete = 0;
2003         }
2004
2005         *pkt_temp = *pkt;
2006
2007         /* if update the audio clock with the pts */
2008         if (pkt->pts != AV_NOPTS_VALUE) {
2009             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2010         }
2011     }
2012 }
2013
2014 /* prepare a new audio buffer */
2015 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2016 {
2017     VideoState *is = opaque;
2018     int audio_size, len1;
2019     double pts;
2020
2021     audio_callback_time = av_gettime_relative();
2022
2023     while (len > 0) {
2024         if (is->audio_buf_index >= is->audio_buf_size) {
2025            audio_size = audio_decode_frame(is, &pts);
2026            if (audio_size < 0) {
2027                 /* if error, just output silence */
2028                is->audio_buf      = is->silence_buf;
2029                is->audio_buf_size = sizeof(is->silence_buf);
2030            } else {
2031                if (is->show_audio)
2032                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2033                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2034                                               pts);
2035                is->audio_buf_size = audio_size;
2036            }
2037            is->audio_buf_index = 0;
2038         }
2039         len1 = is->audio_buf_size - is->audio_buf_index;
2040         if (len1 > len)
2041             len1 = len;
2042         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2043         len -= len1;
2044         stream += len1;
2045         is->audio_buf_index += len1;
2046     }
2047 }
2048
2049 /* open a given stream. Return 0 if OK */
2050 static int stream_component_open(VideoState *is, int stream_index)
2051 {
2052     AVFormatContext *ic = is->ic;
2053     AVCodecContext *avctx;
2054     AVCodec *codec;
2055     SDL_AudioSpec wanted_spec, spec;
2056     AVDictionary *opts;
2057     AVDictionaryEntry *t = NULL;
2058     int ret = 0;
2059
2060     if (stream_index < 0 || stream_index >= ic->nb_streams)
2061         return -1;
2062     avctx = ic->streams[stream_index]->codec;
2063
2064     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2065
2066     codec = avcodec_find_decoder(avctx->codec_id);
2067     avctx->workaround_bugs   = workaround_bugs;
2068     avctx->idct_algo         = idct;
2069     avctx->skip_frame        = skip_frame;
2070     avctx->skip_idct         = skip_idct;
2071     avctx->skip_loop_filter  = skip_loop_filter;
2072     avctx->error_concealment = error_concealment;
2073
2074     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2075
2076     if (!av_dict_get(opts, "threads", NULL, 0))
2077         av_dict_set(&opts, "threads", "auto", 0);
2078     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2079         av_dict_set(&opts, "refcounted_frames", "1", 0);
2080     if (!codec ||
2081         (ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2082         goto fail;
2083     }
2084     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2085         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2086         ret =  AVERROR_OPTION_NOT_FOUND;
2087         goto fail;
2088     }
2089
2090     /* prepare audio output */
2091     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2092         is->sdl_sample_rate = avctx->sample_rate;
2093
2094         if (!avctx->channel_layout)
2095             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2096         if (!avctx->channel_layout) {
2097             fprintf(stderr, "unable to guess channel layout\n");
2098             ret = AVERROR_INVALIDDATA;
2099             goto fail;
2100         }
2101         if (avctx->channels == 1)
2102             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2103         else
2104             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2105         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2106
2107         wanted_spec.format = AUDIO_S16SYS;
2108         wanted_spec.freq = is->sdl_sample_rate;
2109         wanted_spec.channels = is->sdl_channels;
2110         wanted_spec.silence = 0;
2111         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2112         wanted_spec.callback = sdl_audio_callback;
2113         wanted_spec.userdata = is;
2114         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2115             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2116             ret = AVERROR_UNKNOWN;
2117             goto fail;
2118         }
2119         is->audio_hw_buf_size = spec.size;
2120         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2121         is->resample_sample_fmt     = is->sdl_sample_fmt;
2122         is->resample_channel_layout = avctx->channel_layout;
2123         is->resample_sample_rate    = avctx->sample_rate;
2124     }
2125
2126     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2127     switch (avctx->codec_type) {
2128     case AVMEDIA_TYPE_AUDIO:
2129         is->audio_stream = stream_index;
2130         is->audio_st = ic->streams[stream_index];
2131         is->audio_buf_size  = 0;
2132         is->audio_buf_index = 0;
2133
2134         /* init averaging filter */
2135         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2136         is->audio_diff_avg_count = 0;
2137         /* since we do not have a precise anough audio fifo fullness,
2138            we correct audio sync only if larger than this threshold */
2139         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2140
2141         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2142         packet_queue_init(&is->audioq);
2143         SDL_PauseAudio(0);
2144         break;
2145     case AVMEDIA_TYPE_VIDEO:
2146         is->video_stream = stream_index;
2147         is->video_st = ic->streams[stream_index];
2148
2149         packet_queue_init(&is->videoq);
2150         is->video_tid = SDL_CreateThread(video_thread, is);
2151         break;
2152     case AVMEDIA_TYPE_SUBTITLE:
2153         is->subtitle_stream = stream_index;
2154         is->subtitle_st = ic->streams[stream_index];
2155         packet_queue_init(&is->subtitleq);
2156
2157         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2158         break;
2159     default:
2160         break;
2161     }
2162
2163 fail:
2164     av_dict_free(&opts);
2165
2166     return ret;
2167 }
2168
2169 static void stream_component_close(VideoState *is, int stream_index)
2170 {
2171     AVFormatContext *ic = is->ic;
2172     AVCodecContext *avctx;
2173
2174     if (stream_index < 0 || stream_index >= ic->nb_streams)
2175         return;
2176     avctx = ic->streams[stream_index]->codec;
2177
2178     switch (avctx->codec_type) {
2179     case AVMEDIA_TYPE_AUDIO:
2180         packet_queue_abort(&is->audioq);
2181
2182         SDL_CloseAudio();
2183
2184         packet_queue_end(&is->audioq);
2185         av_free_packet(&is->audio_pkt);
2186         if (is->avr)
2187             avresample_free(&is->avr);
2188         av_freep(&is->audio_buf1);
2189         is->audio_buf = NULL;
2190         av_frame_free(&is->frame);
2191
2192         if (is->rdft) {
2193             av_rdft_end(is->rdft);
2194             av_freep(&is->rdft_data);
2195             is->rdft = NULL;
2196             is->rdft_bits = 0;
2197         }
2198         break;
2199     case AVMEDIA_TYPE_VIDEO:
2200         packet_queue_abort(&is->videoq);
2201
2202         /* note: we also signal this mutex to make sure we deblock the
2203            video thread in all cases */
2204         SDL_LockMutex(is->pictq_mutex);
2205         SDL_CondSignal(is->pictq_cond);
2206         SDL_UnlockMutex(is->pictq_mutex);
2207
2208         SDL_WaitThread(is->video_tid, NULL);
2209
2210         packet_queue_end(&is->videoq);
2211         break;
2212     case AVMEDIA_TYPE_SUBTITLE:
2213         packet_queue_abort(&is->subtitleq);
2214
2215         /* note: we also signal this mutex to make sure we deblock the
2216            video thread in all cases */
2217         SDL_LockMutex(is->subpq_mutex);
2218         is->subtitle_stream_changed = 1;
2219
2220         SDL_CondSignal(is->subpq_cond);
2221         SDL_UnlockMutex(is->subpq_mutex);
2222
2223         SDL_WaitThread(is->subtitle_tid, NULL);
2224
2225         packet_queue_end(&is->subtitleq);
2226         break;
2227     default:
2228         break;
2229     }
2230
2231     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2232     avcodec_close(avctx);
2233     switch (avctx->codec_type) {
2234     case AVMEDIA_TYPE_AUDIO:
2235         is->audio_st = NULL;
2236         is->audio_stream = -1;
2237         break;
2238     case AVMEDIA_TYPE_VIDEO:
2239         is->video_st = NULL;
2240         is->video_stream = -1;
2241         break;
2242     case AVMEDIA_TYPE_SUBTITLE:
2243         is->subtitle_st = NULL;
2244         is->subtitle_stream = -1;
2245         break;
2246     default:
2247         break;
2248     }
2249 }
2250
2251 /* since we have only one decoding thread, we can use a global
2252    variable instead of a thread local variable */
2253 static VideoState *global_video_state;
2254
2255 static int decode_interrupt_cb(void *ctx)
2256 {
2257     return global_video_state && global_video_state->abort_request;
2258 }
2259
2260 /* this thread gets the stream from the disk or the network */
2261 static int decode_thread(void *arg)
2262 {
2263     VideoState *is = arg;
2264     AVFormatContext *ic = NULL;
2265     int err, i, ret;
2266     int st_index[AVMEDIA_TYPE_NB];
2267     AVPacket pkt1, *pkt = &pkt1;
2268     int eof = 0;
2269     int pkt_in_play_range = 0;
2270     AVDictionaryEntry *t;
2271     AVDictionary **opts;
2272     int orig_nb_streams;
2273
2274     memset(st_index, -1, sizeof(st_index));
2275     is->video_stream = -1;
2276     is->audio_stream = -1;
2277     is->subtitle_stream = -1;
2278
2279     global_video_state = is;
2280
2281     ic = avformat_alloc_context();
2282     if (!ic) {
2283         av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2284         ret = AVERROR(ENOMEM);
2285         goto fail;
2286     }
2287     ic->interrupt_callback.callback = decode_interrupt_cb;
2288     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2289     if (err < 0) {
2290         print_error(is->filename, err);
2291         ret = -1;
2292         goto fail;
2293     }
2294     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2295         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2296         ret = AVERROR_OPTION_NOT_FOUND;
2297         goto fail;
2298     }
2299     is->ic = ic;
2300
2301     if (genpts)
2302         ic->flags |= AVFMT_FLAG_GENPTS;
2303
2304     opts = setup_find_stream_info_opts(ic, codec_opts);
2305     orig_nb_streams = ic->nb_streams;
2306
2307     err = avformat_find_stream_info(ic, opts);
2308
2309     for (i = 0; i < orig_nb_streams; i++)
2310         av_dict_free(&opts[i]);
2311     av_freep(&opts);
2312
2313     if (err < 0) {
2314         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2315         ret = -1;
2316         goto fail;
2317     }
2318
2319     if (ic->pb)
2320         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2321
2322     if (seek_by_bytes < 0)
2323         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2324
2325     /* if seeking requested, we execute it */
2326     if (start_time != AV_NOPTS_VALUE) {
2327         int64_t timestamp;
2328
2329         timestamp = start_time;
2330         /* add the stream start time */
2331         if (ic->start_time != AV_NOPTS_VALUE)
2332             timestamp += ic->start_time;
2333         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2334         if (ret < 0) {
2335             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2336                     is->filename, (double)timestamp / AV_TIME_BASE);
2337         }
2338     }
2339
2340     for (i = 0; i < ic->nb_streams; i++)
2341         ic->streams[i]->discard = AVDISCARD_ALL;
2342     if (!video_disable)
2343         st_index[AVMEDIA_TYPE_VIDEO] =
2344             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2345                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2346     if (!audio_disable)
2347         st_index[AVMEDIA_TYPE_AUDIO] =
2348             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2349                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2350                                 st_index[AVMEDIA_TYPE_VIDEO],
2351                                 NULL, 0);
2352     if (!video_disable)
2353         st_index[AVMEDIA_TYPE_SUBTITLE] =
2354             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2355                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2356                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2357                                  st_index[AVMEDIA_TYPE_AUDIO] :
2358                                  st_index[AVMEDIA_TYPE_VIDEO]),
2359                                 NULL, 0);
2360     if (show_status) {
2361         av_dump_format(ic, 0, is->filename, 0);
2362     }
2363
2364     /* open the streams */
2365     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2366         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2367     }
2368
2369     ret = -1;
2370     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2371         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2372     }
2373     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2374     if (ret < 0) {
2375         if (!display_disable)
2376             is->show_audio = 2;
2377     }
2378
2379     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2380         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2381     }
2382
2383     if (is->video_stream < 0 && is->audio_stream < 0) {
2384         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2385         ret = -1;
2386         goto fail;
2387     }
2388
2389     for (;;) {
2390         if (is->abort_request)
2391             break;
2392         if (is->paused != is->last_paused) {
2393             is->last_paused = is->paused;
2394             if (is->paused)
2395                 is->read_pause_return = av_read_pause(ic);
2396             else
2397                 av_read_play(ic);
2398         }
2399 #if CONFIG_RTSP_DEMUXER
2400         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2401             /* wait 10 ms to avoid trying to get another packet */
2402             /* XXX: horrible */
2403             SDL_Delay(10);
2404             continue;
2405         }
2406 #endif
2407         if (is->seek_req) {
2408             int64_t seek_target = is->seek_pos;
2409             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2410             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2411 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2412 //      of the seek_pos/seek_rel variables
2413
2414             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2415             if (ret < 0) {
2416                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2417             } else {
2418                 if (is->audio_stream >= 0) {
2419                     packet_queue_flush(&is->audioq);
2420                     packet_queue_put(&is->audioq, &flush_pkt);
2421                 }
2422                 if (is->subtitle_stream >= 0) {
2423                     packet_queue_flush(&is->subtitleq);
2424                     packet_queue_put(&is->subtitleq, &flush_pkt);
2425                 }
2426                 if (is->video_stream >= 0) {
2427                     packet_queue_flush(&is->videoq);
2428                     packet_queue_put(&is->videoq, &flush_pkt);
2429                 }
2430             }
2431             is->seek_req = 0;
2432             eof = 0;
2433         }
2434
2435         /* if the queue are full, no need to read more */
2436         if (!infinite_buffer &&
2437               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2438             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2439                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2440                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2441             /* wait 10 ms */
2442             SDL_Delay(10);
2443             continue;
2444         }
2445         if (eof) {
2446             if (is->video_stream >= 0) {
2447                 av_init_packet(pkt);
2448                 pkt->data = NULL;
2449                 pkt->size = 0;
2450                 pkt->stream_index = is->video_stream;
2451                 packet_queue_put(&is->videoq, pkt);
2452             }
2453             if (is->audio_stream >= 0 &&
2454                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2455                 av_init_packet(pkt);
2456                 pkt->data = NULL;
2457                 pkt->size = 0;
2458                 pkt->stream_index = is->audio_stream;
2459                 packet_queue_put(&is->audioq, pkt);
2460             }
2461             SDL_Delay(10);
2462             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2463                 if (loop != 1 && (!loop || --loop)) {
2464                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2465                 } else if (!noautoexit) {
2466                     ret = AVERROR_EOF;
2467                     goto fail;
2468                 }
2469             }
2470             continue;
2471         }
2472         ret = av_read_frame(ic, pkt);
2473         if (ret < 0) {
2474             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2475                 eof = 1;
2476             if (ic->pb && ic->pb->error)
2477                 break;
2478             SDL_Delay(100); /* wait for user event */
2479             continue;
2480         }
2481         /* check if packet is in play range specified by user, then queue, otherwise discard */
2482         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2483                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2484                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2485                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2486                 <= ((double)duration / 1000000);
2487         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2488             packet_queue_put(&is->audioq, pkt);
2489         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2490             packet_queue_put(&is->videoq, pkt);
2491         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2492             packet_queue_put(&is->subtitleq, pkt);
2493         } else {
2494             av_free_packet(pkt);
2495         }
2496     }
2497     /* wait until the end */
2498     while (!is->abort_request) {
2499         SDL_Delay(100);
2500     }
2501
2502     ret = 0;
2503  fail:
2504     /* disable interrupting */
2505     global_video_state = NULL;
2506
2507     /* close each stream */
2508     if (is->audio_stream >= 0)
2509         stream_component_close(is, is->audio_stream);
2510     if (is->video_stream >= 0)
2511         stream_component_close(is, is->video_stream);
2512     if (is->subtitle_stream >= 0)
2513         stream_component_close(is, is->subtitle_stream);
2514     if (is->ic) {
2515         avformat_close_input(&is->ic);
2516     }
2517
2518     if (ret != 0) {
2519         SDL_Event event;
2520
2521         event.type = FF_QUIT_EVENT;
2522         event.user.data1 = is;
2523         SDL_PushEvent(&event);
2524     }
2525     return 0;
2526 }
2527
2528 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2529 {
2530     VideoState *is;
2531
2532     is = av_mallocz(sizeof(VideoState));
2533     if (!is)
2534         return NULL;
2535     av_strlcpy(is->filename, filename, sizeof(is->filename));
2536     is->iformat = iformat;
2537     is->ytop    = 0;
2538     is->xleft   = 0;
2539
2540     /* start video display */
2541     is->pictq_mutex = SDL_CreateMutex();
2542     is->pictq_cond  = SDL_CreateCond();
2543
2544     is->subpq_mutex = SDL_CreateMutex();
2545     is->subpq_cond  = SDL_CreateCond();
2546
2547     is->av_sync_type = av_sync_type;
2548     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2549     if (!is->parse_tid) {
2550         av_free(is);
2551         return NULL;
2552     }
2553     return is;
2554 }
2555
2556 static void stream_cycle_channel(VideoState *is, int codec_type)
2557 {
2558     AVFormatContext *ic = is->ic;
2559     int start_index, stream_index;
2560     AVStream *st;
2561
2562     if (codec_type == AVMEDIA_TYPE_VIDEO)
2563         start_index = is->video_stream;
2564     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2565         start_index = is->audio_stream;
2566     else
2567         start_index = is->subtitle_stream;
2568     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2569         return;
2570     stream_index = start_index;
2571     for (;;) {
2572         if (++stream_index >= is->ic->nb_streams)
2573         {
2574             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2575             {
2576                 stream_index = -1;
2577                 goto the_end;
2578             } else
2579                 stream_index = 0;
2580         }
2581         if (stream_index == start_index)
2582             return;
2583         st = ic->streams[stream_index];
2584         if (st->codec->codec_type == codec_type) {
2585             /* check that parameters are OK */
2586             switch (codec_type) {
2587             case AVMEDIA_TYPE_AUDIO:
2588                 if (st->codec->sample_rate != 0 &&
2589                     st->codec->channels != 0)
2590                     goto the_end;
2591                 break;
2592             case AVMEDIA_TYPE_VIDEO:
2593             case AVMEDIA_TYPE_SUBTITLE:
2594                 goto the_end;
2595             default:
2596                 break;
2597             }
2598         }
2599     }
2600  the_end:
2601     stream_component_close(is, start_index);
2602     stream_component_open(is, stream_index);
2603 }
2604
2605
2606 static void toggle_full_screen(void)
2607 {
2608 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2609     /* OS X needs to empty the picture_queue */
2610     int i;
2611     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2612         cur_stream->pictq[i].reallocate = 1;
2613 #endif
2614     is_full_screen = !is_full_screen;
2615     video_open(cur_stream);
2616 }
2617
2618 static void toggle_pause(void)
2619 {
2620     if (cur_stream)
2621         stream_pause(cur_stream);
2622     step = 0;
2623 }
2624
2625 static void step_to_next_frame(void)
2626 {
2627     if (cur_stream) {
2628         /* if the stream is paused unpause it, then step */
2629         if (cur_stream->paused)
2630             stream_pause(cur_stream);
2631     }
2632     step = 1;
2633 }
2634
2635 static void toggle_audio_display(void)
2636 {
2637     if (cur_stream) {
2638         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2639         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2640         fill_rectangle(screen,
2641                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2642                        bgcolor);
2643         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2644     }
2645 }
2646
2647 static void seek_chapter(VideoState *is, int incr)
2648 {
2649     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
2650     int i;
2651
2652     if (!is->ic->nb_chapters)
2653         return;
2654
2655     /* find the current chapter */
2656     for (i = 0; i < is->ic->nb_chapters; i++) {
2657         AVChapter *ch = is->ic->chapters[i];
2658         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
2659             i--;
2660             break;
2661         }
2662     }
2663
2664     i += incr;
2665     i = FFMAX(i, 0);
2666     if (i >= is->ic->nb_chapters)
2667         return;
2668
2669     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
2670     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
2671                                  AV_TIME_BASE_Q), 0, 0);
2672 }
2673
2674 /* handle an event sent by the GUI */
2675 static void event_loop(void)
2676 {
2677     SDL_Event event;
2678     double incr, pos, frac;
2679
2680     for (;;) {
2681         double x;
2682         SDL_WaitEvent(&event);
2683         switch (event.type) {
2684         case SDL_KEYDOWN:
2685             if (exit_on_keydown) {
2686                 do_exit();
2687                 break;
2688             }
2689             switch (event.key.keysym.sym) {
2690             case SDLK_ESCAPE:
2691             case SDLK_q:
2692                 do_exit();
2693                 break;
2694             case SDLK_f:
2695                 toggle_full_screen();
2696                 break;
2697             case SDLK_p:
2698             case SDLK_SPACE:
2699                 toggle_pause();
2700                 break;
2701             case SDLK_s: // S: Step to next frame
2702                 step_to_next_frame();
2703                 break;
2704             case SDLK_a:
2705                 if (cur_stream)
2706                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2707                 break;
2708             case SDLK_v:
2709                 if (cur_stream)
2710                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2711                 break;
2712             case SDLK_t:
2713                 if (cur_stream)
2714                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2715                 break;
2716             case SDLK_w:
2717                 toggle_audio_display();
2718                 break;
2719             case SDLK_PAGEUP:
2720                 seek_chapter(cur_stream, 1);
2721                 break;
2722             case SDLK_PAGEDOWN:
2723                 seek_chapter(cur_stream, -1);
2724                 break;
2725             case SDLK_LEFT:
2726                 incr = -10.0;
2727                 goto do_seek;
2728             case SDLK_RIGHT:
2729                 incr = 10.0;
2730                 goto do_seek;
2731             case SDLK_UP:
2732                 incr = 60.0;
2733                 goto do_seek;
2734             case SDLK_DOWN:
2735                 incr = -60.0;
2736             do_seek:
2737                 if (cur_stream) {
2738                     if (seek_by_bytes) {
2739                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2740                             pos = cur_stream->video_current_pos;
2741                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2742                             pos = cur_stream->audio_pkt.pos;
2743                         } else
2744                             pos = avio_tell(cur_stream->ic->pb);
2745                         if (cur_stream->ic->bit_rate)
2746                             incr *= cur_stream->ic->bit_rate / 8.0;
2747                         else
2748                             incr *= 180000.0;
2749                         pos += incr;
2750                         stream_seek(cur_stream, pos, incr, 1);
2751                     } else {
2752                         pos = get_master_clock(cur_stream);
2753                         pos += incr;
2754                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2755                     }
2756                 }
2757                 break;
2758             default:
2759                 break;
2760             }
2761             break;
2762         case SDL_MOUSEBUTTONDOWN:
2763             if (exit_on_mousedown) {
2764                 do_exit();
2765                 break;
2766             }
2767         case SDL_MOUSEMOTION:
2768             if (event.type == SDL_MOUSEBUTTONDOWN) {
2769                 x = event.button.x;
2770             } else {
2771                 if (event.motion.state != SDL_PRESSED)
2772                     break;
2773                 x = event.motion.x;
2774             }
2775             if (cur_stream) {
2776                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2777                     uint64_t size =  avio_size(cur_stream->ic->pb);
2778                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2779                 } else {
2780                     int64_t ts;
2781                     int ns, hh, mm, ss;
2782                     int tns, thh, tmm, tss;
2783                     tns  = cur_stream->ic->duration / 1000000LL;
2784                     thh  = tns / 3600;
2785                     tmm  = (tns % 3600) / 60;
2786                     tss  = (tns % 60);
2787                     frac = x / cur_stream->width;
2788                     ns   = frac * tns;
2789                     hh   = ns / 3600;
2790                     mm   = (ns % 3600) / 60;
2791                     ss   = (ns % 60);
2792                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2793                             hh, mm, ss, thh, tmm, tss);
2794                     ts = frac * cur_stream->ic->duration;
2795                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2796                         ts += cur_stream->ic->start_time;
2797                     stream_seek(cur_stream, ts, 0, 0);
2798                 }
2799             }
2800             break;
2801         case SDL_VIDEORESIZE:
2802             if (cur_stream) {
2803                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2804                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2805                 screen_width  = cur_stream->width  = event.resize.w;
2806                 screen_height = cur_stream->height = event.resize.h;
2807             }
2808             break;
2809         case SDL_QUIT:
2810         case FF_QUIT_EVENT:
2811             do_exit();
2812             break;
2813         case FF_ALLOC_EVENT:
2814             video_open(event.user.data1);
2815             alloc_picture(event.user.data1);
2816             break;
2817         case FF_REFRESH_EVENT:
2818             video_refresh_timer(event.user.data1);
2819             cur_stream->refresh = 0;
2820             break;
2821         default:
2822             break;
2823         }
2824     }
2825 }
2826
2827 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2828 {
2829     av_log(NULL, AV_LOG_ERROR,
2830            "Option '%s' has been removed, use private format options instead\n", opt);
2831     return AVERROR(EINVAL);
2832 }
2833
2834 static int opt_width(void *optctx, const char *opt, const char *arg)
2835 {
2836     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2837     return 0;
2838 }
2839
2840 static int opt_height(void *optctx, const char *opt, const char *arg)
2841 {
2842     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2843     return 0;
2844 }
2845
2846 static int opt_format(void *optctx, const char *opt, const char *arg)
2847 {
2848     file_iformat = av_find_input_format(arg);
2849     if (!file_iformat) {
2850         fprintf(stderr, "Unknown input format: %s\n", arg);
2851         return AVERROR(EINVAL);
2852     }
2853     return 0;
2854 }
2855
2856 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2857 {
2858     av_log(NULL, AV_LOG_ERROR,
2859            "Option '%s' has been removed, use private format options instead\n", opt);
2860     return AVERROR(EINVAL);
2861 }
2862
2863 static int opt_sync(void *optctx, const char *opt, const char *arg)
2864 {
2865     if (!strcmp(arg, "audio"))
2866         av_sync_type = AV_SYNC_AUDIO_MASTER;
2867     else if (!strcmp(arg, "video"))
2868         av_sync_type = AV_SYNC_VIDEO_MASTER;
2869     else if (!strcmp(arg, "ext"))
2870         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2871     else {
2872         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2873         exit(1);
2874     }
2875     return 0;
2876 }
2877
2878 static int opt_seek(void *optctx, const char *opt, const char *arg)
2879 {
2880     start_time = parse_time_or_die(opt, arg, 1);
2881     return 0;
2882 }
2883
2884 static int opt_duration(void *optctx, const char *opt, const char *arg)
2885 {
2886     duration = parse_time_or_die(opt, arg, 1);
2887     return 0;
2888 }
2889
2890 static const OptionDef options[] = {
2891 #include "cmdutils_common_opts.h"
2892     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2893     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2894     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2895     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2896     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2897     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2898     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2899     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2900     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2901     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2902     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2903     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2904     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2905     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2906     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2907     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2908     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2909     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2910     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2911     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2912     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2913     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2914     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2915     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2916     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2917     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2918     { "noautoexit", OPT_BOOL | OPT_EXPERT, { &noautoexit }, "Do not exit at the end of playback", "" },
2919     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2920     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2921     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2922     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2923     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2924     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2925 #if CONFIG_AVFILTER
2926     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2927 #endif
2928     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2929     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2930     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2931     { NULL, },
2932 };
2933
2934 static void show_usage(void)
2935 {
2936     printf("Simple media player\n");
2937     printf("usage: %s [options] input_file\n", program_name);
2938     printf("\n");
2939 }
2940
2941 void show_help_default(const char *opt, const char *arg)
2942 {
2943     av_log_set_callback(log_callback_help);
2944     show_usage();
2945     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2946     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2947     printf("\n");
2948     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2949     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2950 #if !CONFIG_AVFILTER
2951     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2952 #endif
2953     printf("\nWhile playing:\n"
2954            "q, ESC              quit\n"
2955            "f                   toggle full screen\n"
2956            "p, SPC              pause\n"
2957            "a                   cycle audio channel\n"
2958            "v                   cycle video channel\n"
2959            "t                   cycle subtitle channel\n"
2960            "w                   show audio waves\n"
2961            "s                   activate frame-step mode\n"
2962            "left/right          seek backward/forward 10 seconds\n"
2963            "down/up             seek backward/forward 1 minute\n"
2964            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2965            );
2966 }
2967
2968 static void opt_input_file(void *optctx, const char *filename)
2969 {
2970     if (input_filename) {
2971         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2972                 filename, input_filename);
2973         exit(1);
2974     }
2975     if (!strcmp(filename, "-"))
2976         filename = "pipe:";
2977     input_filename = filename;
2978 }
2979
2980 /* Called from the main */
2981 int main(int argc, char **argv)
2982 {
2983     int flags;
2984
2985     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2986     parse_loglevel(argc, argv, options);
2987
2988     /* register all codecs, demux and protocols */
2989     avcodec_register_all();
2990 #if CONFIG_AVDEVICE
2991     avdevice_register_all();
2992 #endif
2993 #if CONFIG_AVFILTER
2994     avfilter_register_all();
2995 #endif
2996     av_register_all();
2997     avformat_network_init();
2998
2999     init_opts();
3000
3001     show_banner();
3002
3003     parse_options(NULL, argc, argv, options, opt_input_file);
3004
3005     if (!input_filename) {
3006         show_usage();
3007         fprintf(stderr, "An input file must be specified\n");
3008         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3009         exit(1);
3010     }
3011
3012     if (display_disable) {
3013         video_disable = 1;
3014     }
3015     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3016 #if !defined(__MINGW32__) && !defined(__APPLE__)
3017     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3018 #endif
3019     if (SDL_Init (flags)) {
3020         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3021         exit(1);
3022     }
3023
3024     if (!display_disable) {
3025         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3026         fs_screen_width = vi->current_w;
3027         fs_screen_height = vi->current_h;
3028     }
3029
3030     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3031     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3032     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3033
3034     av_init_packet(&flush_pkt);
3035     flush_pkt.data = (uint8_t *)&flush_pkt;
3036
3037     cur_stream = stream_open(input_filename, file_iformat);
3038
3039     event_loop();
3040
3041     /* never returns */
3042
3043     return 0;
3044 }