avfilter/vf_scale: Allow chroma samples to be above and to the left of luma samples
[ffmpeg.git] / ffplay.c
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
48 #include "libswresample/swresample.h"
49
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56
57 #include <SDL.h>
58 #include <SDL_thread.h>
59
60 #include "cmdutils.h"
61
62 #include <assert.h>
63
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
83
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN  0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX  1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB   20
94
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
97
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101
102 #define CURSOR_HIDE_DELAY 1000000
103
104 static int64_t sws_flags = SWS_BICUBIC;
105
106 typedef struct MyAVPacketList {
107     AVPacket pkt;
108     struct MyAVPacketList *next;
109     int serial;
110 } MyAVPacketList;
111
112 typedef struct PacketQueue {
113     MyAVPacketList *first_pkt, *last_pkt;
114     int nb_packets;
115     int size;
116     int abort_request;
117     int serial;
118     SDL_mutex *mutex;
119     SDL_cond *cond;
120 } PacketQueue;
121
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 16
124 #define FRAME_QUEUE_SIZE FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE)
125
126 typedef struct AudioParams {
127     int freq;
128     int channels;
129     int64_t channel_layout;
130     enum AVSampleFormat fmt;
131     int frame_size;
132     int bytes_per_sec;
133 } AudioParams;
134
135 typedef struct Clock {
136     double pts;           /* clock base */
137     double pts_drift;     /* clock base minus time at which we updated the clock */
138     double last_updated;
139     double speed;
140     int serial;           /* clock is based on a packet with this serial */
141     int paused;
142     int *queue_serial;    /* pointer to the current packet queue serial, used for obsolete clock detection */
143 } Clock;
144
145 /* Common struct for handling all types of decoded data and allocated render buffers. */
146 typedef struct Frame {
147     AVFrame *frame;
148     AVSubtitle sub;
149     int serial;
150     double pts;           /* presentation timestamp for the frame */
151     double duration;      /* estimated duration of the frame */
152     int64_t pos;          /* byte position of the frame in the input file */
153     SDL_Overlay *bmp;
154     int allocated;
155     int reallocate;
156     int width;
157     int height;
158     AVRational sar;
159 } Frame;
160
161 typedef struct FrameQueue {
162     Frame queue[FRAME_QUEUE_SIZE];
163     int rindex;
164     int windex;
165     int size;
166     int max_size;
167     int keep_last;
168     int rindex_shown;
169     SDL_mutex *mutex;
170     SDL_cond *cond;
171     PacketQueue *pktq;
172 } FrameQueue;
173
174 enum {
175     AV_SYNC_AUDIO_MASTER, /* default choice */
176     AV_SYNC_VIDEO_MASTER,
177     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
178 };
179
180 typedef struct VideoState {
181     SDL_Thread *read_tid;
182     SDL_Thread *video_tid;
183     AVInputFormat *iformat;
184     int no_background;
185     int abort_request;
186     int force_refresh;
187     int paused;
188     int last_paused;
189     int queue_attachments_req;
190     int seek_req;
191     int seek_flags;
192     int64_t seek_pos;
193     int64_t seek_rel;
194     int read_pause_return;
195     AVFormatContext *ic;
196     int realtime;
197     int audio_finished;
198     int video_finished;
199
200     Clock audclk;
201     Clock vidclk;
202     Clock extclk;
203
204     FrameQueue pictq;
205     FrameQueue subpq;
206
207     int audio_stream;
208
209     int av_sync_type;
210
211     double audio_clock;
212     int audio_clock_serial;
213     double audio_diff_cum; /* used for AV difference average computation */
214     double audio_diff_avg_coef;
215     double audio_diff_threshold;
216     int audio_diff_avg_count;
217     AVStream *audio_st;
218     PacketQueue audioq;
219     int audio_hw_buf_size;
220     uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
221     uint8_t *audio_buf;
222     uint8_t *audio_buf1;
223     unsigned int audio_buf_size; /* in bytes */
224     unsigned int audio_buf1_size;
225     int audio_buf_index; /* in bytes */
226     int audio_write_buf_size;
227     int audio_buf_frames_pending;
228     AVPacket audio_pkt_temp;
229     AVPacket audio_pkt;
230     int audio_pkt_temp_serial;
231     int audio_last_serial;
232     struct AudioParams audio_src;
233 #if CONFIG_AVFILTER
234     struct AudioParams audio_filter_src;
235 #endif
236     struct AudioParams audio_tgt;
237     struct SwrContext *swr_ctx;
238     int frame_drops_early;
239     int frame_drops_late;
240     AVFrame *frame;
241     int64_t audio_frame_next_pts;
242
243     enum ShowMode {
244         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
245     } show_mode;
246     int16_t sample_array[SAMPLE_ARRAY_SIZE];
247     int sample_array_index;
248     int last_i_start;
249     RDFTContext *rdft;
250     int rdft_bits;
251     FFTSample *rdft_data;
252     int xpos;
253     double last_vis_time;
254
255     SDL_Thread *subtitle_tid;
256     int subtitle_stream;
257     AVStream *subtitle_st;
258     PacketQueue subtitleq;
259
260     double frame_timer;
261     double frame_last_returned_time;
262     double frame_last_filter_delay;
263     int video_stream;
264     AVStream *video_st;
265     PacketQueue videoq;
266     double max_frame_duration;      // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
267 #if !CONFIG_AVFILTER
268     struct SwsContext *img_convert_ctx;
269 #endif
270     SDL_Rect last_display_rect;
271
272     char filename[1024];
273     int width, height, xleft, ytop;
274     int step;
275
276 #if CONFIG_AVFILTER
277     int vfilter_idx;
278     AVFilterContext *in_video_filter;   // the first filter in the video chain
279     AVFilterContext *out_video_filter;  // the last filter in the video chain
280     AVFilterContext *in_audio_filter;   // the first filter in the audio chain
281     AVFilterContext *out_audio_filter;  // the last filter in the audio chain
282     AVFilterGraph *agraph;              // audio filter graph
283 #endif
284
285     int last_video_stream, last_audio_stream, last_subtitle_stream;
286
287     SDL_cond *continue_read_thread;
288 } VideoState;
289
290 /* options specified by the user */
291 static AVInputFormat *file_iformat;
292 static const char *input_filename;
293 static const char *window_title;
294 static int fs_screen_width;
295 static int fs_screen_height;
296 static int default_width  = 640;
297 static int default_height = 480;
298 static int screen_width  = 0;
299 static int screen_height = 0;
300 static int audio_disable;
301 static int video_disable;
302 static int subtitle_disable;
303 static int wanted_stream[AVMEDIA_TYPE_NB] = {
304     [AVMEDIA_TYPE_AUDIO]    = -1,
305     [AVMEDIA_TYPE_VIDEO]    = -1,
306     [AVMEDIA_TYPE_SUBTITLE] = -1,
307 };
308 static int seek_by_bytes = -1;
309 static int display_disable;
310 static int show_status = 1;
311 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
312 static int64_t start_time = AV_NOPTS_VALUE;
313 static int64_t duration = AV_NOPTS_VALUE;
314 static int workaround_bugs = 1;
315 static int fast = 0;
316 static int genpts = 0;
317 static int lowres = 0;
318 static int decoder_reorder_pts = -1;
319 static int autoexit;
320 static int exit_on_keydown;
321 static int exit_on_mousedown;
322 static int loop = 1;
323 static int framedrop = -1;
324 static int infinite_buffer = -1;
325 static enum ShowMode show_mode = SHOW_MODE_NONE;
326 static const char *audio_codec_name;
327 static const char *subtitle_codec_name;
328 static const char *video_codec_name;
329 double rdftspeed = 0.02;
330 static int64_t cursor_last_shown;
331 static int cursor_hidden = 0;
332 #if CONFIG_AVFILTER
333 static const char **vfilters_list = NULL;
334 static int nb_vfilters = 0;
335 static char *afilters = NULL;
336 #endif
337 static int autorotate = 1;
338
339 /* current context */
340 static int is_full_screen;
341 static int64_t audio_callback_time;
342
343 static AVPacket flush_pkt;
344
345 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
346 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
347
348 static SDL_Surface *screen;
349
350 #if CONFIG_AVFILTER
351 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
352 {
353     GROW_ARRAY(vfilters_list, nb_vfilters);
354     vfilters_list[nb_vfilters - 1] = arg;
355     return 0;
356 }
357 #endif
358
359 static inline
360 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
361                    enum AVSampleFormat fmt2, int64_t channel_count2)
362 {
363     /* If channel count == 1, planar and non-planar formats are the same */
364     if (channel_count1 == 1 && channel_count2 == 1)
365         return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
366     else
367         return channel_count1 != channel_count2 || fmt1 != fmt2;
368 }
369
370 static inline
371 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
372 {
373     if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
374         return channel_layout;
375     else
376         return 0;
377 }
378
379 static void free_picture(Frame *vp);
380
381 static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
382 {
383     MyAVPacketList *pkt1;
384
385     if (q->abort_request)
386        return -1;
387
388     pkt1 = av_malloc(sizeof(MyAVPacketList));
389     if (!pkt1)
390         return -1;
391     pkt1->pkt = *pkt;
392     pkt1->next = NULL;
393     if (pkt == &flush_pkt)
394         q->serial++;
395     pkt1->serial = q->serial;
396
397     if (!q->last_pkt)
398         q->first_pkt = pkt1;
399     else
400         q->last_pkt->next = pkt1;
401     q->last_pkt = pkt1;
402     q->nb_packets++;
403     q->size += pkt1->pkt.size + sizeof(*pkt1);
404     /* XXX: should duplicate packet data in DV case */
405     SDL_CondSignal(q->cond);
406     return 0;
407 }
408
409 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
410 {
411     int ret;
412
413     /* duplicate the packet */
414     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
415         return -1;
416
417     SDL_LockMutex(q->mutex);
418     ret = packet_queue_put_private(q, pkt);
419     SDL_UnlockMutex(q->mutex);
420
421     if (pkt != &flush_pkt && ret < 0)
422         av_free_packet(pkt);
423
424     return ret;
425 }
426
427 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
428 {
429     AVPacket pkt1, *pkt = &pkt1;
430     av_init_packet(pkt);
431     pkt->data = NULL;
432     pkt->size = 0;
433     pkt->stream_index = stream_index;
434     return packet_queue_put(q, pkt);
435 }
436
437 /* packet queue handling */
438 static void packet_queue_init(PacketQueue *q)
439 {
440     memset(q, 0, sizeof(PacketQueue));
441     q->mutex = SDL_CreateMutex();
442     q->cond = SDL_CreateCond();
443     q->abort_request = 1;
444 }
445
446 static void packet_queue_flush(PacketQueue *q)
447 {
448     MyAVPacketList *pkt, *pkt1;
449
450     SDL_LockMutex(q->mutex);
451     for (pkt = q->first_pkt; pkt; pkt = pkt1) {
452         pkt1 = pkt->next;
453         av_free_packet(&pkt->pkt);
454         av_freep(&pkt);
455     }
456     q->last_pkt = NULL;
457     q->first_pkt = NULL;
458     q->nb_packets = 0;
459     q->size = 0;
460     SDL_UnlockMutex(q->mutex);
461 }
462
463 static void packet_queue_destroy(PacketQueue *q)
464 {
465     packet_queue_flush(q);
466     SDL_DestroyMutex(q->mutex);
467     SDL_DestroyCond(q->cond);
468 }
469
470 static void packet_queue_abort(PacketQueue *q)
471 {
472     SDL_LockMutex(q->mutex);
473
474     q->abort_request = 1;
475
476     SDL_CondSignal(q->cond);
477
478     SDL_UnlockMutex(q->mutex);
479 }
480
481 static void packet_queue_start(PacketQueue *q)
482 {
483     SDL_LockMutex(q->mutex);
484     q->abort_request = 0;
485     packet_queue_put_private(q, &flush_pkt);
486     SDL_UnlockMutex(q->mutex);
487 }
488
489 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
490 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
491 {
492     MyAVPacketList *pkt1;
493     int ret;
494
495     SDL_LockMutex(q->mutex);
496
497     for (;;) {
498         if (q->abort_request) {
499             ret = -1;
500             break;
501         }
502
503         pkt1 = q->first_pkt;
504         if (pkt1) {
505             q->first_pkt = pkt1->next;
506             if (!q->first_pkt)
507                 q->last_pkt = NULL;
508             q->nb_packets--;
509             q->size -= pkt1->pkt.size + sizeof(*pkt1);
510             *pkt = pkt1->pkt;
511             if (serial)
512                 *serial = pkt1->serial;
513             av_free(pkt1);
514             ret = 1;
515             break;
516         } else if (!block) {
517             ret = 0;
518             break;
519         } else {
520             SDL_CondWait(q->cond, q->mutex);
521         }
522     }
523     SDL_UnlockMutex(q->mutex);
524     return ret;
525 }
526
527 static void frame_queue_unref_item(Frame *vp)
528 {
529     av_frame_unref(vp->frame);
530     avsubtitle_free(&vp->sub);
531 }
532
533 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
534 {
535     int i;
536     memset(f, 0, sizeof(FrameQueue));
537     if (!(f->mutex = SDL_CreateMutex()))
538         return AVERROR(ENOMEM);
539     if (!(f->cond = SDL_CreateCond()))
540         return AVERROR(ENOMEM);
541     f->pktq = pktq;
542     f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
543     f->keep_last = !!keep_last;
544     for (i = 0; i < f->max_size; i++)
545         if (!(f->queue[i].frame = av_frame_alloc()))
546             return AVERROR(ENOMEM);
547     return 0;
548 }
549
550 static void frame_queue_destory(FrameQueue *f)
551 {
552     int i;
553     for (i = 0; i < f->max_size; i++) {
554         Frame *vp = &f->queue[i];
555         frame_queue_unref_item(vp);
556         av_frame_free(&vp->frame);
557         free_picture(vp);
558     }
559     SDL_DestroyMutex(f->mutex);
560     SDL_DestroyCond(f->cond);
561 }
562
563 static void frame_queue_signal(FrameQueue *f)
564 {
565     SDL_LockMutex(f->mutex);
566     SDL_CondSignal(f->cond);
567     SDL_UnlockMutex(f->mutex);
568 }
569
570 static Frame *frame_queue_peek(FrameQueue *f)
571 {
572     return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
573 }
574
575 static Frame *frame_queue_peek_next(FrameQueue *f)
576 {
577     return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
578 }
579
580 static Frame *frame_queue_peek_last(FrameQueue *f)
581 {
582     return &f->queue[f->rindex];
583 }
584
585 static Frame *frame_queue_peek_writable(FrameQueue *f)
586 {
587     /* wait until we have space to put a new frame */
588     SDL_LockMutex(f->mutex);
589     while (f->size >= f->max_size &&
590            !f->pktq->abort_request) {
591         SDL_CondWait(f->cond, f->mutex);
592     }
593     SDL_UnlockMutex(f->mutex);
594
595     if (f->pktq->abort_request)
596         return NULL;
597
598     return &f->queue[f->windex];
599 }
600
601 static void frame_queue_push(FrameQueue *f)
602 {
603     if (++f->windex == f->max_size)
604         f->windex = 0;
605     SDL_LockMutex(f->mutex);
606     f->size++;
607     SDL_UnlockMutex(f->mutex);
608 }
609
610 static void frame_queue_next(FrameQueue *f)
611 {
612     if (f->keep_last && !f->rindex_shown) {
613         f->rindex_shown = 1;
614         return;
615     }
616     frame_queue_unref_item(&f->queue[f->rindex]);
617     if (++f->rindex == f->max_size)
618         f->rindex = 0;
619     SDL_LockMutex(f->mutex);
620     f->size--;
621     SDL_CondSignal(f->cond);
622     SDL_UnlockMutex(f->mutex);
623 }
624
625 /* jump back to the previous frame if available by resetting rindex_shown */
626 static int frame_queue_prev(FrameQueue *f)
627 {
628     int ret = f->rindex_shown;
629     f->rindex_shown = 0;
630     return ret;
631 }
632
633 /* return the number of undisplayed frames in the queue */
634 static int frame_queue_nb_remaining(FrameQueue *f)
635 {
636     return f->size - f->rindex_shown;
637 }
638
639 /* return last shown position */
640 static int64_t frame_queue_last_pos(FrameQueue *f)
641 {
642     Frame *fp = &f->queue[f->rindex];
643     if (f->rindex_shown && fp->serial == f->pktq->serial)
644         return fp->pos;
645     else
646         return -1;
647 }
648
649 static inline void fill_rectangle(SDL_Surface *screen,
650                                   int x, int y, int w, int h, int color, int update)
651 {
652     SDL_Rect rect;
653     rect.x = x;
654     rect.y = y;
655     rect.w = w;
656     rect.h = h;
657     SDL_FillRect(screen, &rect, color);
658     if (update && w > 0 && h > 0)
659         SDL_UpdateRect(screen, x, y, w, h);
660 }
661
662 /* draw only the border of a rectangle */
663 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
664 {
665     int w1, w2, h1, h2;
666
667     /* fill the background */
668     w1 = x;
669     if (w1 < 0)
670         w1 = 0;
671     w2 = width - (x + w);
672     if (w2 < 0)
673         w2 = 0;
674     h1 = y;
675     if (h1 < 0)
676         h1 = 0;
677     h2 = height - (y + h);
678     if (h2 < 0)
679         h2 = 0;
680     fill_rectangle(screen,
681                    xleft, ytop,
682                    w1, height,
683                    color, update);
684     fill_rectangle(screen,
685                    xleft + width - w2, ytop,
686                    w2, height,
687                    color, update);
688     fill_rectangle(screen,
689                    xleft + w1, ytop,
690                    width - w1 - w2, h1,
691                    color, update);
692     fill_rectangle(screen,
693                    xleft + w1, ytop + height - h2,
694                    width - w1 - w2, h2,
695                    color, update);
696 }
697
698 #define ALPHA_BLEND(a, oldp, newp, s)\
699 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
700
701 #define RGBA_IN(r, g, b, a, s)\
702 {\
703     unsigned int v = ((const uint32_t *)(s))[0];\
704     a = (v >> 24) & 0xff;\
705     r = (v >> 16) & 0xff;\
706     g = (v >> 8) & 0xff;\
707     b = v & 0xff;\
708 }
709
710 #define YUVA_IN(y, u, v, a, s, pal)\
711 {\
712     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
713     a = (val >> 24) & 0xff;\
714     y = (val >> 16) & 0xff;\
715     u = (val >> 8) & 0xff;\
716     v = val & 0xff;\
717 }
718
719 #define YUVA_OUT(d, y, u, v, a)\
720 {\
721     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
722 }
723
724
725 #define BPP 1
726
727 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
728 {
729     int wrap, wrap3, width2, skip2;
730     int y, u, v, a, u1, v1, a1, w, h;
731     uint8_t *lum, *cb, *cr;
732     const uint8_t *p;
733     const uint32_t *pal;
734     int dstx, dsty, dstw, dsth;
735
736     dstw = av_clip(rect->w, 0, imgw);
737     dsth = av_clip(rect->h, 0, imgh);
738     dstx = av_clip(rect->x, 0, imgw - dstw);
739     dsty = av_clip(rect->y, 0, imgh - dsth);
740     lum = dst->data[0] + dsty * dst->linesize[0];
741     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
742     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
743
744     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
745     skip2 = dstx >> 1;
746     wrap = dst->linesize[0];
747     wrap3 = rect->pict.linesize[0];
748     p = rect->pict.data[0];
749     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
750
751     if (dsty & 1) {
752         lum += dstx;
753         cb += skip2;
754         cr += skip2;
755
756         if (dstx & 1) {
757             YUVA_IN(y, u, v, a, p, pal);
758             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
759             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
760             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
761             cb++;
762             cr++;
763             lum++;
764             p += BPP;
765         }
766         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
767             YUVA_IN(y, u, v, a, p, pal);
768             u1 = u;
769             v1 = v;
770             a1 = a;
771             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
772
773             YUVA_IN(y, u, v, a, p + BPP, pal);
774             u1 += u;
775             v1 += v;
776             a1 += a;
777             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
778             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
779             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
780             cb++;
781             cr++;
782             p += 2 * BPP;
783             lum += 2;
784         }
785         if (w) {
786             YUVA_IN(y, u, v, a, p, pal);
787             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
788             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
789             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
790             p++;
791             lum++;
792         }
793         p += wrap3 - dstw * BPP;
794         lum += wrap - dstw - dstx;
795         cb += dst->linesize[1] - width2 - skip2;
796         cr += dst->linesize[2] - width2 - skip2;
797     }
798     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
799         lum += dstx;
800         cb += skip2;
801         cr += skip2;
802
803         if (dstx & 1) {
804             YUVA_IN(y, u, v, a, p, pal);
805             u1 = u;
806             v1 = v;
807             a1 = a;
808             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
809             p += wrap3;
810             lum += wrap;
811             YUVA_IN(y, u, v, a, p, pal);
812             u1 += u;
813             v1 += v;
814             a1 += a;
815             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
816             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
817             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
818             cb++;
819             cr++;
820             p += -wrap3 + BPP;
821             lum += -wrap + 1;
822         }
823         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
824             YUVA_IN(y, u, v, a, p, pal);
825             u1 = u;
826             v1 = v;
827             a1 = a;
828             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
829
830             YUVA_IN(y, u, v, a, p + BPP, pal);
831             u1 += u;
832             v1 += v;
833             a1 += a;
834             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
835             p += wrap3;
836             lum += wrap;
837
838             YUVA_IN(y, u, v, a, p, pal);
839             u1 += u;
840             v1 += v;
841             a1 += a;
842             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
843
844             YUVA_IN(y, u, v, a, p + BPP, pal);
845             u1 += u;
846             v1 += v;
847             a1 += a;
848             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
849
850             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
851             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
852
853             cb++;
854             cr++;
855             p += -wrap3 + 2 * BPP;
856             lum += -wrap + 2;
857         }
858         if (w) {
859             YUVA_IN(y, u, v, a, p, pal);
860             u1 = u;
861             v1 = v;
862             a1 = a;
863             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
864             p += wrap3;
865             lum += wrap;
866             YUVA_IN(y, u, v, a, p, pal);
867             u1 += u;
868             v1 += v;
869             a1 += a;
870             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
871             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
872             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
873             cb++;
874             cr++;
875             p += -wrap3 + BPP;
876             lum += -wrap + 1;
877         }
878         p += wrap3 + (wrap3 - dstw * BPP);
879         lum += wrap + (wrap - dstw - dstx);
880         cb += dst->linesize[1] - width2 - skip2;
881         cr += dst->linesize[2] - width2 - skip2;
882     }
883     /* handle odd height */
884     if (h) {
885         lum += dstx;
886         cb += skip2;
887         cr += skip2;
888
889         if (dstx & 1) {
890             YUVA_IN(y, u, v, a, p, pal);
891             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
892             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
893             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
894             cb++;
895             cr++;
896             lum++;
897             p += BPP;
898         }
899         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
900             YUVA_IN(y, u, v, a, p, pal);
901             u1 = u;
902             v1 = v;
903             a1 = a;
904             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
905
906             YUVA_IN(y, u, v, a, p + BPP, pal);
907             u1 += u;
908             v1 += v;
909             a1 += a;
910             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
911             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
912             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
913             cb++;
914             cr++;
915             p += 2 * BPP;
916             lum += 2;
917         }
918         if (w) {
919             YUVA_IN(y, u, v, a, p, pal);
920             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
921             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
922             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
923         }
924     }
925 }
926
927 static void free_picture(Frame *vp)
928 {
929      if (vp->bmp) {
930          SDL_FreeYUVOverlay(vp->bmp);
931          vp->bmp = NULL;
932      }
933 }
934
935 static void calculate_display_rect(SDL_Rect *rect,
936                                    int scr_xleft, int scr_ytop, int scr_width, int scr_height,
937                                    int pic_width, int pic_height, AVRational pic_sar)
938 {
939     float aspect_ratio;
940     int width, height, x, y;
941
942     if (pic_sar.num == 0)
943         aspect_ratio = 0;
944     else
945         aspect_ratio = av_q2d(pic_sar);
946
947     if (aspect_ratio <= 0.0)
948         aspect_ratio = 1.0;
949     aspect_ratio *= (float)pic_width / (float)pic_height;
950
951     /* XXX: we suppose the screen has a 1.0 pixel ratio */
952     height = scr_height;
953     width = ((int)rint(height * aspect_ratio)) & ~1;
954     if (width > scr_width) {
955         width = scr_width;
956         height = ((int)rint(width / aspect_ratio)) & ~1;
957     }
958     x = (scr_width - width) / 2;
959     y = (scr_height - height) / 2;
960     rect->x = scr_xleft + x;
961     rect->y = scr_ytop  + y;
962     rect->w = FFMAX(width,  1);
963     rect->h = FFMAX(height, 1);
964 }
965
966 static void video_image_display(VideoState *is)
967 {
968     Frame *vp;
969     Frame *sp;
970     AVPicture pict;
971     SDL_Rect rect;
972     int i;
973
974     vp = frame_queue_peek(&is->pictq);
975     if (vp->bmp) {
976         if (is->subtitle_st) {
977             if (frame_queue_nb_remaining(&is->subpq) > 0) {
978                 sp = frame_queue_peek(&is->subpq);
979
980                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
981                     SDL_LockYUVOverlay (vp->bmp);
982
983                     pict.data[0] = vp->bmp->pixels[0];
984                     pict.data[1] = vp->bmp->pixels[2];
985                     pict.data[2] = vp->bmp->pixels[1];
986
987                     pict.linesize[0] = vp->bmp->pitches[0];
988                     pict.linesize[1] = vp->bmp->pitches[2];
989                     pict.linesize[2] = vp->bmp->pitches[1];
990
991                     for (i = 0; i < sp->sub.num_rects; i++)
992                         blend_subrect(&pict, sp->sub.rects[i],
993                                       vp->bmp->w, vp->bmp->h);
994
995                     SDL_UnlockYUVOverlay (vp->bmp);
996                 }
997             }
998         }
999
1000         calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1001
1002         SDL_DisplayYUVOverlay(vp->bmp, &rect);
1003
1004         if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
1005             int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1006             fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
1007             is->last_display_rect = rect;
1008         }
1009     }
1010 }
1011
1012 static inline int compute_mod(int a, int b)
1013 {
1014     return a < 0 ? a%b + b : a%b;
1015 }
1016
1017 static void video_audio_display(VideoState *s)
1018 {
1019     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1020     int ch, channels, h, h2, bgcolor, fgcolor;
1021     int64_t time_diff;
1022     int rdft_bits, nb_freq;
1023
1024     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1025         ;
1026     nb_freq = 1 << (rdft_bits - 1);
1027
1028     /* compute display index : center on currently output samples */
1029     channels = s->audio_tgt.channels;
1030     nb_display_channels = channels;
1031     if (!s->paused) {
1032         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1033         n = 2 * channels;
1034         delay = s->audio_write_buf_size;
1035         delay /= n;
1036
1037         /* to be more precise, we take into account the time spent since
1038            the last buffer computation */
1039         if (audio_callback_time) {
1040             time_diff = av_gettime_relative() - audio_callback_time;
1041             delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1042         }
1043
1044         delay += 2 * data_used;
1045         if (delay < data_used)
1046             delay = data_used;
1047
1048         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1049         if (s->show_mode == SHOW_MODE_WAVES) {
1050             h = INT_MIN;
1051             for (i = 0; i < 1000; i += channels) {
1052                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1053                 int a = s->sample_array[idx];
1054                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1055                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1056                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1057                 int score = a - d;
1058                 if (h < score && (b ^ c) < 0) {
1059                     h = score;
1060                     i_start = idx;
1061                 }
1062             }
1063         }
1064
1065         s->last_i_start = i_start;
1066     } else {
1067         i_start = s->last_i_start;
1068     }
1069
1070     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1071     if (s->show_mode == SHOW_MODE_WAVES) {
1072         fill_rectangle(screen,
1073                        s->xleft, s->ytop, s->width, s->height,
1074                        bgcolor, 0);
1075
1076         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1077
1078         /* total height for one channel */
1079         h = s->height / nb_display_channels;
1080         /* graph height / 2 */
1081         h2 = (h * 9) / 20;
1082         for (ch = 0; ch < nb_display_channels; ch++) {
1083             i = i_start + ch;
1084             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1085             for (x = 0; x < s->width; x++) {
1086                 y = (s->sample_array[i] * h2) >> 15;
1087                 if (y < 0) {
1088                     y = -y;
1089                     ys = y1 - y;
1090                 } else {
1091                     ys = y1;
1092                 }
1093                 fill_rectangle(screen,
1094                                s->xleft + x, ys, 1, y,
1095                                fgcolor, 0);
1096                 i += channels;
1097                 if (i >= SAMPLE_ARRAY_SIZE)
1098                     i -= SAMPLE_ARRAY_SIZE;
1099             }
1100         }
1101
1102         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1103
1104         for (ch = 1; ch < nb_display_channels; ch++) {
1105             y = s->ytop + ch * h;
1106             fill_rectangle(screen,
1107                            s->xleft, y, s->width, 1,
1108                            fgcolor, 0);
1109         }
1110         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1111     } else {
1112         nb_display_channels= FFMIN(nb_display_channels, 2);
1113         if (rdft_bits != s->rdft_bits) {
1114             av_rdft_end(s->rdft);
1115             av_free(s->rdft_data);
1116             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1117             s->rdft_bits = rdft_bits;
1118             s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1119         }
1120         {
1121             FFTSample *data[2];
1122             for (ch = 0; ch < nb_display_channels; ch++) {
1123                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
1124                 i = i_start + ch;
1125                 for (x = 0; x < 2 * nb_freq; x++) {
1126                     double w = (x-nb_freq) * (1.0 / nb_freq);
1127                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1128                     i += channels;
1129                     if (i >= SAMPLE_ARRAY_SIZE)
1130                         i -= SAMPLE_ARRAY_SIZE;
1131                 }
1132                 av_rdft_calc(s->rdft, data[ch]);
1133             }
1134             /* Least efficient way to do this, we should of course
1135              * directly access it but it is more than fast enough. */
1136             for (y = 0; y < s->height; y++) {
1137                 double w = 1 / sqrt(nb_freq);
1138                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1139                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1140                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1141                 a = FFMIN(a, 255);
1142                 b = FFMIN(b, 255);
1143                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1144
1145                 fill_rectangle(screen,
1146                             s->xpos, s->height-y, 1, 1,
1147                             fgcolor, 0);
1148             }
1149         }
1150         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1151         if (!s->paused)
1152             s->xpos++;
1153         if (s->xpos >= s->width)
1154             s->xpos= s->xleft;
1155     }
1156 }
1157
1158 static void stream_close(VideoState *is)
1159 {
1160     /* XXX: use a special url_shutdown call to abort parse cleanly */
1161     is->abort_request = 1;
1162     SDL_WaitThread(is->read_tid, NULL);
1163     packet_queue_destroy(&is->videoq);
1164     packet_queue_destroy(&is->audioq);
1165     packet_queue_destroy(&is->subtitleq);
1166
1167     /* free all pictures */
1168     frame_queue_destory(&is->pictq);
1169     frame_queue_destory(&is->subpq);
1170     SDL_DestroyCond(is->continue_read_thread);
1171 #if !CONFIG_AVFILTER
1172     sws_freeContext(is->img_convert_ctx);
1173 #endif
1174     av_free(is);
1175 }
1176
1177 static void do_exit(VideoState *is)
1178 {
1179     if (is) {
1180         stream_close(is);
1181     }
1182     av_lockmgr_register(NULL);
1183     uninit_opts();
1184 #if CONFIG_AVFILTER
1185     av_freep(&vfilters_list);
1186 #endif
1187     avformat_network_deinit();
1188     if (show_status)
1189         printf("\n");
1190     SDL_Quit();
1191     av_log(NULL, AV_LOG_QUIET, "%s", "");
1192     exit(0);
1193 }
1194
1195 static void sigterm_handler(int sig)
1196 {
1197     exit(123);
1198 }
1199
1200 static void set_default_window_size(int width, int height, AVRational sar)
1201 {
1202     SDL_Rect rect;
1203     calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1204     default_width  = rect.w;
1205     default_height = rect.h;
1206 }
1207
1208 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1209 {
1210     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1211     int w,h;
1212
1213     if (is_full_screen) flags |= SDL_FULLSCREEN;
1214     else                flags |= SDL_RESIZABLE;
1215
1216     if (vp && vp->width)
1217         set_default_window_size(vp->width, vp->height, vp->sar);
1218
1219     if (is_full_screen && fs_screen_width) {
1220         w = fs_screen_width;
1221         h = fs_screen_height;
1222     } else if (!is_full_screen && screen_width) {
1223         w = screen_width;
1224         h = screen_height;
1225     } else {
1226         w = default_width;
1227         h = default_height;
1228     }
1229     w = FFMIN(16383, w);
1230     if (screen && is->width == screen->w && screen->w == w
1231        && is->height== screen->h && screen->h == h && !force_set_video_mode)
1232         return 0;
1233     screen = SDL_SetVideoMode(w, h, 0, flags);
1234     if (!screen) {
1235         av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1236         do_exit(is);
1237     }
1238     if (!window_title)
1239         window_title = input_filename;
1240     SDL_WM_SetCaption(window_title, window_title);
1241
1242     is->width  = screen->w;
1243     is->height = screen->h;
1244
1245     return 0;
1246 }
1247
1248 /* display the current picture, if any */
1249 static void video_display(VideoState *is)
1250 {
1251     if (!screen)
1252         video_open(is, 0, NULL);
1253     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1254         video_audio_display(is);
1255     else if (is->video_st)
1256         video_image_display(is);
1257 }
1258
1259 static double get_clock(Clock *c)
1260 {
1261     if (*c->queue_serial != c->serial)
1262         return NAN;
1263     if (c->paused) {
1264         return c->pts;
1265     } else {
1266         double time = av_gettime_relative() / 1000000.0;
1267         return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1268     }
1269 }
1270
1271 static void set_clock_at(Clock *c, double pts, int serial, double time)
1272 {
1273     c->pts = pts;
1274     c->last_updated = time;
1275     c->pts_drift = c->pts - time;
1276     c->serial = serial;
1277 }
1278
1279 static void set_clock(Clock *c, double pts, int serial)
1280 {
1281     double time = av_gettime_relative() / 1000000.0;
1282     set_clock_at(c, pts, serial, time);
1283 }
1284
1285 static void set_clock_speed(Clock *c, double speed)
1286 {
1287     set_clock(c, get_clock(c), c->serial);
1288     c->speed = speed;
1289 }
1290
1291 static void init_clock(Clock *c, int *queue_serial)
1292 {
1293     c->speed = 1.0;
1294     c->paused = 0;
1295     c->queue_serial = queue_serial;
1296     set_clock(c, NAN, -1);
1297 }
1298
1299 static void sync_clock_to_slave(Clock *c, Clock *slave)
1300 {
1301     double clock = get_clock(c);
1302     double slave_clock = get_clock(slave);
1303     if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1304         set_clock(c, slave_clock, slave->serial);
1305 }
1306
1307 static int get_master_sync_type(VideoState *is) {
1308     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1309         if (is->video_st)
1310             return AV_SYNC_VIDEO_MASTER;
1311         else
1312             return AV_SYNC_AUDIO_MASTER;
1313     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1314         if (is->audio_st)
1315             return AV_SYNC_AUDIO_MASTER;
1316         else
1317             return AV_SYNC_EXTERNAL_CLOCK;
1318     } else {
1319         return AV_SYNC_EXTERNAL_CLOCK;
1320     }
1321 }
1322
1323 /* get the current master clock value */
1324 static double get_master_clock(VideoState *is)
1325 {
1326     double val;
1327
1328     switch (get_master_sync_type(is)) {
1329         case AV_SYNC_VIDEO_MASTER:
1330             val = get_clock(&is->vidclk);
1331             break;
1332         case AV_SYNC_AUDIO_MASTER:
1333             val = get_clock(&is->audclk);
1334             break;
1335         default:
1336             val = get_clock(&is->extclk);
1337             break;
1338     }
1339     return val;
1340 }
1341
1342 static void check_external_clock_speed(VideoState *is) {
1343    if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1344        is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1345        set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
1346    } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1347               (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1348        set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
1349    } else {
1350        double speed = is->extclk.speed;
1351        if (speed != 1.0)
1352            set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1353    }
1354 }
1355
1356 /* seek in the stream */
1357 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1358 {
1359     if (!is->seek_req) {
1360         is->seek_pos = pos;
1361         is->seek_rel = rel;
1362         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1363         if (seek_by_bytes)
1364             is->seek_flags |= AVSEEK_FLAG_BYTE;
1365         is->seek_req = 1;
1366         SDL_CondSignal(is->continue_read_thread);
1367     }
1368 }
1369
1370 /* pause or resume the video */
1371 static void stream_toggle_pause(VideoState *is)
1372 {
1373     if (is->paused) {
1374         is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1375         if (is->read_pause_return != AVERROR(ENOSYS)) {
1376             is->vidclk.paused = 0;
1377         }
1378         set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1379     }
1380     set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1381     is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1382 }
1383
1384 static void toggle_pause(VideoState *is)
1385 {
1386     stream_toggle_pause(is);
1387     is->step = 0;
1388 }
1389
1390 static void step_to_next_frame(VideoState *is)
1391 {
1392     /* if the stream is paused unpause it, then step */
1393     if (is->paused)
1394         stream_toggle_pause(is);
1395     is->step = 1;
1396 }
1397
1398 static double compute_target_delay(double delay, VideoState *is)
1399 {
1400     double sync_threshold, diff;
1401
1402     /* update delay to follow master synchronisation source */
1403     if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
1404         /* if video is slave, we try to correct big delays by
1405            duplicating or deleting a frame */
1406         diff = get_clock(&is->vidclk) - get_master_clock(is);
1407
1408         /* skip or repeat frame. We take into account the
1409            delay to compute the threshold. I still don't know
1410            if it is the best guess */
1411         sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1412         if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1413             if (diff <= -sync_threshold)
1414                 delay = FFMAX(0, delay + diff);
1415             else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1416                 delay = delay + diff;
1417             else if (diff >= sync_threshold)
1418                 delay = 2 * delay;
1419         }
1420     }
1421
1422     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1423             delay, -diff);
1424
1425     return delay;
1426 }
1427
1428 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1429     if (vp->serial == nextvp->serial) {
1430         double duration = nextvp->pts - vp->pts;
1431         if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1432             return vp->duration;
1433         else
1434             return duration;
1435     } else {
1436         return 0.0;
1437     }
1438 }
1439
1440 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1441     /* update current video pts */
1442     set_clock(&is->vidclk, pts, serial);
1443     sync_clock_to_slave(&is->extclk, &is->vidclk);
1444 }
1445
1446 /* called to display each frame */
1447 static void video_refresh(void *opaque, double *remaining_time)
1448 {
1449     VideoState *is = opaque;
1450     double time;
1451
1452     Frame *sp, *sp2;
1453
1454     if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1455         check_external_clock_speed(is);
1456
1457     if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1458         time = av_gettime_relative() / 1000000.0;
1459         if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1460             video_display(is);
1461             is->last_vis_time = time;
1462         }
1463         *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1464     }
1465
1466     if (is->video_st) {
1467         int redisplay = 0;
1468         if (is->force_refresh)
1469             redisplay = frame_queue_prev(&is->pictq);
1470 retry:
1471         if (frame_queue_nb_remaining(&is->pictq) == 0) {
1472             // nothing to do, no picture to display in the queue
1473         } else {
1474             double last_duration, duration, delay;
1475             Frame *vp, *lastvp;
1476
1477             /* dequeue the picture */
1478             lastvp = frame_queue_peek_last(&is->pictq);
1479             vp = frame_queue_peek(&is->pictq);
1480
1481             if (vp->serial != is->videoq.serial) {
1482                 frame_queue_next(&is->pictq);
1483                 redisplay = 0;
1484                 goto retry;
1485             }
1486
1487             if (lastvp->serial != vp->serial && !redisplay)
1488                 is->frame_timer = av_gettime_relative() / 1000000.0;
1489
1490             if (is->paused)
1491                 goto display;
1492
1493             /* compute nominal last_duration */
1494             last_duration = vp_duration(is, lastvp, vp);
1495             if (redisplay)
1496                 delay = 0.0;
1497             else
1498                 delay = compute_target_delay(last_duration, is);
1499
1500             time= av_gettime_relative()/1000000.0;
1501             if (time < is->frame_timer + delay && !redisplay) {
1502                 *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1503                 return;
1504             }
1505
1506             is->frame_timer += delay;
1507             if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1508                 is->frame_timer = time;
1509
1510             SDL_LockMutex(is->pictq.mutex);
1511             if (!redisplay && !isnan(vp->pts))
1512                 update_video_pts(is, vp->pts, vp->pos, vp->serial);
1513             SDL_UnlockMutex(is->pictq.mutex);
1514
1515             if (frame_queue_nb_remaining(&is->pictq) > 1) {
1516                 Frame *nextvp = frame_queue_peek_next(&is->pictq);
1517                 duration = vp_duration(is, vp, nextvp);
1518                 if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1519                     if (!redisplay)
1520                         is->frame_drops_late++;
1521                     frame_queue_next(&is->pictq);
1522                     redisplay = 0;
1523                     goto retry;
1524                 }
1525             }
1526
1527             if (is->subtitle_st) {
1528                     while (frame_queue_nb_remaining(&is->subpq) > 0) {
1529                         sp = frame_queue_peek(&is->subpq);
1530
1531                         if (frame_queue_nb_remaining(&is->subpq) > 1)
1532                             sp2 = frame_queue_peek_next(&is->subpq);
1533                         else
1534                             sp2 = NULL;
1535
1536                         if (sp->serial != is->subtitleq.serial
1537                                 || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1538                                 || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1539                         {
1540                             frame_queue_next(&is->subpq);
1541                         } else {
1542                             break;
1543                         }
1544                     }
1545             }
1546
1547 display:
1548             /* display picture */
1549             if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1550                 video_display(is);
1551
1552             frame_queue_next(&is->pictq);
1553
1554             if (is->step && !is->paused)
1555                 stream_toggle_pause(is);
1556         }
1557     }
1558     is->force_refresh = 0;
1559     if (show_status) {
1560         static int64_t last_time;
1561         int64_t cur_time;
1562         int aqsize, vqsize, sqsize;
1563         double av_diff;
1564
1565         cur_time = av_gettime_relative();
1566         if (!last_time || (cur_time - last_time) >= 30000) {
1567             aqsize = 0;
1568             vqsize = 0;
1569             sqsize = 0;
1570             if (is->audio_st)
1571                 aqsize = is->audioq.size;
1572             if (is->video_st)
1573                 vqsize = is->videoq.size;
1574             if (is->subtitle_st)
1575                 sqsize = is->subtitleq.size;
1576             av_diff = 0;
1577             if (is->audio_st && is->video_st)
1578                 av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1579             else if (is->video_st)
1580                 av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1581             else if (is->audio_st)
1582                 av_diff = get_master_clock(is) - get_clock(&is->audclk);
1583             av_log(NULL, AV_LOG_INFO,
1584                    "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1585                    get_master_clock(is),
1586                    (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : "   ")),
1587                    av_diff,
1588                    is->frame_drops_early + is->frame_drops_late,
1589                    aqsize / 1024,
1590                    vqsize / 1024,
1591                    sqsize,
1592                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1593                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1594             fflush(stdout);
1595             last_time = cur_time;
1596         }
1597     }
1598 }
1599
1600 /* allocate a picture (needs to do that in main thread to avoid
1601    potential locking problems */
1602 static void alloc_picture(VideoState *is)
1603 {
1604     Frame *vp;
1605     int64_t bufferdiff;
1606
1607     vp = &is->pictq.queue[is->pictq.windex];
1608
1609     free_picture(vp);
1610
1611     video_open(is, 0, vp);
1612
1613     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1614                                    SDL_YV12_OVERLAY,
1615                                    screen);
1616     bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1617     if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1618         /* SDL allocates a buffer smaller than requested if the video
1619          * overlay hardware is unable to support the requested size. */
1620         av_log(NULL, AV_LOG_FATAL,
1621                "Error: the video system does not support an image\n"
1622                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1623                         "to reduce the image size.\n", vp->width, vp->height );
1624         do_exit(is);
1625     }
1626
1627     SDL_LockMutex(is->pictq.mutex);
1628     vp->allocated = 1;
1629     SDL_CondSignal(is->pictq.cond);
1630     SDL_UnlockMutex(is->pictq.mutex);
1631 }
1632
1633 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1634     int i, width, height;
1635     Uint8 *p, *maxp;
1636     for (i = 0; i < 3; i++) {
1637         width  = bmp->w;
1638         height = bmp->h;
1639         if (i > 0) {
1640             width  >>= 1;
1641             height >>= 1;
1642         }
1643         if (bmp->pitches[i] > width) {
1644             maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1645             for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1646                 *(p+1) = *p;
1647         }
1648     }
1649 }
1650
1651 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1652 {
1653     Frame *vp;
1654
1655 #if defined(DEBUG_SYNC) && 0
1656     printf("frame_type=%c pts=%0.3f\n",
1657            av_get_picture_type_char(src_frame->pict_type), pts);
1658 #endif
1659
1660     if (!(vp = frame_queue_peek_writable(&is->pictq)))
1661         return -1;
1662
1663     vp->sar = src_frame->sample_aspect_ratio;
1664
1665     /* alloc or resize hardware picture buffer */
1666     if (!vp->bmp || vp->reallocate || !vp->allocated ||
1667         vp->width  != src_frame->width ||
1668         vp->height != src_frame->height) {
1669         SDL_Event event;
1670
1671         vp->allocated  = 0;
1672         vp->reallocate = 0;
1673         vp->width = src_frame->width;
1674         vp->height = src_frame->height;
1675
1676         /* the allocation must be done in the main thread to avoid
1677            locking problems. */
1678         event.type = FF_ALLOC_EVENT;
1679         event.user.data1 = is;
1680         SDL_PushEvent(&event);
1681
1682         /* wait until the picture is allocated */
1683         SDL_LockMutex(is->pictq.mutex);
1684         while (!vp->allocated && !is->videoq.abort_request) {
1685             SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1686         }
1687         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1688         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1689             while (!vp->allocated && !is->abort_request) {
1690                 SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1691             }
1692         }
1693         SDL_UnlockMutex(is->pictq.mutex);
1694
1695         if (is->videoq.abort_request)
1696             return -1;
1697     }
1698
1699     /* if the frame is not skipped, then display it */
1700     if (vp->bmp) {
1701         AVPicture pict = { { 0 } };
1702
1703         /* get a pointer on the bitmap */
1704         SDL_LockYUVOverlay (vp->bmp);
1705
1706         pict.data[0] = vp->bmp->pixels[0];
1707         pict.data[1] = vp->bmp->pixels[2];
1708         pict.data[2] = vp->bmp->pixels[1];
1709
1710         pict.linesize[0] = vp->bmp->pitches[0];
1711         pict.linesize[1] = vp->bmp->pitches[2];
1712         pict.linesize[2] = vp->bmp->pitches[1];
1713
1714 #if CONFIG_AVFILTER
1715         // FIXME use direct rendering
1716         av_picture_copy(&pict, (AVPicture *)src_frame,
1717                         src_frame->format, vp->width, vp->height);
1718 #else
1719         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1720         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1721             vp->width, vp->height, src_frame->format, vp->width, vp->height,
1722             AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1723         if (!is->img_convert_ctx) {
1724             av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1725             exit(1);
1726         }
1727         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1728                   0, vp->height, pict.data, pict.linesize);
1729 #endif
1730         /* workaround SDL PITCH_WORKAROUND */
1731         duplicate_right_border_pixels(vp->bmp);
1732         /* update the bitmap content */
1733         SDL_UnlockYUVOverlay(vp->bmp);
1734
1735         vp->pts = pts;
1736         vp->duration = duration;
1737         vp->pos = pos;
1738         vp->serial = serial;
1739
1740         /* now we can update the picture count */
1741         frame_queue_push(&is->pictq);
1742     }
1743     return 0;
1744 }
1745
1746 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1747 {
1748     int got_picture;
1749
1750     if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1751         return -1;
1752
1753     if (pkt->data == flush_pkt.data) {
1754         avcodec_flush_buffers(is->video_st->codec);
1755         return 0;
1756     }
1757
1758     if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1759         return 0;
1760
1761     if (!got_picture && !pkt->data)
1762         is->video_finished = *serial;
1763
1764     if (got_picture) {
1765         int ret = 1;
1766         double dpts = NAN;
1767
1768         if (decoder_reorder_pts == -1) {
1769             frame->pts = av_frame_get_best_effort_timestamp(frame);
1770         } else if (decoder_reorder_pts) {
1771             frame->pts = frame->pkt_pts;
1772         } else {
1773             frame->pts = frame->pkt_dts;
1774         }
1775
1776         if (frame->pts != AV_NOPTS_VALUE)
1777             dpts = av_q2d(is->video_st->time_base) * frame->pts;
1778
1779         frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1780
1781         if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
1782             if (frame->pts != AV_NOPTS_VALUE) {
1783                 double diff = dpts - get_master_clock(is);
1784                 if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1785                     diff - is->frame_last_filter_delay < 0 &&
1786                     *serial == is->vidclk.serial &&
1787                     is->videoq.nb_packets) {
1788                     is->frame_drops_early++;
1789                     av_frame_unref(frame);
1790                     ret = 0;
1791                 }
1792             }
1793         }
1794
1795         return ret;
1796     }
1797     return 0;
1798 }
1799
1800 #if CONFIG_AVFILTER
1801 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1802                                  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1803 {
1804     int ret, i;
1805     int nb_filters = graph->nb_filters;
1806     AVFilterInOut *outputs = NULL, *inputs = NULL;
1807
1808     if (filtergraph) {
1809         outputs = avfilter_inout_alloc();
1810         inputs  = avfilter_inout_alloc();
1811         if (!outputs || !inputs) {
1812             ret = AVERROR(ENOMEM);
1813             goto fail;
1814         }
1815
1816         outputs->name       = av_strdup("in");
1817         outputs->filter_ctx = source_ctx;
1818         outputs->pad_idx    = 0;
1819         outputs->next       = NULL;
1820
1821         inputs->name        = av_strdup("out");
1822         inputs->filter_ctx  = sink_ctx;
1823         inputs->pad_idx     = 0;
1824         inputs->next        = NULL;
1825
1826         if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1827             goto fail;
1828     } else {
1829         if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1830             goto fail;
1831     }
1832
1833     /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1834     for (i = 0; i < graph->nb_filters - nb_filters; i++)
1835         FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1836
1837     ret = avfilter_graph_config(graph, NULL);
1838 fail:
1839     avfilter_inout_free(&outputs);
1840     avfilter_inout_free(&inputs);
1841     return ret;
1842 }
1843
1844 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1845 {
1846     static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1847     char sws_flags_str[128];
1848     char buffersrc_args[256];
1849     int ret;
1850     AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1851     AVCodecContext *codec = is->video_st->codec;
1852     AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1853
1854     av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1855     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1856     graph->scale_sws_opts = av_strdup(sws_flags_str);
1857
1858     snprintf(buffersrc_args, sizeof(buffersrc_args),
1859              "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1860              frame->width, frame->height, frame->format,
1861              is->video_st->time_base.num, is->video_st->time_base.den,
1862              codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1863     if (fr.num && fr.den)
1864         av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1865
1866     if ((ret = avfilter_graph_create_filter(&filt_src,
1867                                             avfilter_get_by_name("buffer"),
1868                                             "ffplay_buffer", buffersrc_args, NULL,
1869                                             graph)) < 0)
1870         goto fail;
1871
1872     ret = avfilter_graph_create_filter(&filt_out,
1873                                        avfilter_get_by_name("buffersink"),
1874                                        "ffplay_buffersink", NULL, NULL, graph);
1875     if (ret < 0)
1876         goto fail;
1877
1878     if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts,  AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1879         goto fail;
1880
1881     last_filter = filt_out;
1882
1883 /* Note: this macro adds a filter before the lastly added filter, so the
1884  * processing order of the filters is in reverse */
1885 #define INSERT_FILT(name, arg) do {                                         \
1886     AVFilterContext *filt_ctx;                                              \
1887                                                                             \
1888     ret = avfilter_graph_create_filter(&filt_ctx,                           \
1889                                        avfilter_get_by_name(name),          \
1890                                        "ffplay_" name, arg, NULL, graph);   \
1891     if (ret < 0)                                                            \
1892         goto fail;                                                          \
1893                                                                             \
1894     ret = avfilter_link(filt_ctx, 0, last_filter, 0);                       \
1895     if (ret < 0)                                                            \
1896         goto fail;                                                          \
1897                                                                             \
1898     last_filter = filt_ctx;                                                 \
1899 } while (0)
1900
1901     /* SDL YUV code is not handling odd width/height for some driver
1902      * combinations, therefore we crop the picture to an even width/height. */
1903     INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1904
1905     if (autorotate) {
1906         AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
1907         if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
1908             if (!strcmp(rotate_tag->value, "90")) {
1909                 INSERT_FILT("transpose", "clock");
1910             } else if (!strcmp(rotate_tag->value, "180")) {
1911                 INSERT_FILT("hflip", NULL);
1912                 INSERT_FILT("vflip", NULL);
1913             } else if (!strcmp(rotate_tag->value, "270")) {
1914                 INSERT_FILT("transpose", "cclock");
1915             } else {
1916                 char rotate_buf[64];
1917                 snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
1918                 INSERT_FILT("rotate", rotate_buf);
1919             }
1920         }
1921     }
1922
1923     if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1924         goto fail;
1925
1926     is->in_video_filter  = filt_src;
1927     is->out_video_filter = filt_out;
1928
1929 fail:
1930     return ret;
1931 }
1932
1933 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1934 {
1935     static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
1936     int sample_rates[2] = { 0, -1 };
1937     int64_t channel_layouts[2] = { 0, -1 };
1938     int channels[2] = { 0, -1 };
1939     AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1940     char aresample_swr_opts[512] = "";
1941     AVDictionaryEntry *e = NULL;
1942     char asrc_args[256];
1943     int ret;
1944
1945     avfilter_graph_free(&is->agraph);
1946     if (!(is->agraph = avfilter_graph_alloc()))
1947         return AVERROR(ENOMEM);
1948
1949     while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1950         av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1951     if (strlen(aresample_swr_opts))
1952         aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1953     av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1954
1955     ret = snprintf(asrc_args, sizeof(asrc_args),
1956                    "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1957                    is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1958                    is->audio_filter_src.channels,
1959                    1, is->audio_filter_src.freq);
1960     if (is->audio_filter_src.channel_layout)
1961         snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1962                  ":channel_layout=0x%"PRIx64,  is->audio_filter_src.channel_layout);
1963
1964     ret = avfilter_graph_create_filter(&filt_asrc,
1965                                        avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1966                                        asrc_args, NULL, is->agraph);
1967     if (ret < 0)
1968         goto end;
1969
1970
1971     ret = avfilter_graph_create_filter(&filt_asink,
1972                                        avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1973                                        NULL, NULL, is->agraph);
1974     if (ret < 0)
1975         goto end;
1976
1977     if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts,  AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1978         goto end;
1979     if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1980         goto end;
1981
1982     if (force_output_format) {
1983         channel_layouts[0] = is->audio_tgt.channel_layout;
1984         channels       [0] = is->audio_tgt.channels;
1985         sample_rates   [0] = is->audio_tgt.freq;
1986         if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1987             goto end;
1988         if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1989             goto end;
1990         if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels       ,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1991             goto end;
1992         if ((ret = av_opt_set_int_list(filt_asink, "sample_rates"   , sample_rates   ,  -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1993             goto end;
1994     }
1995
1996
1997     if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1998         goto end;
1999
2000     is->in_audio_filter  = filt_asrc;
2001     is->out_audio_filter = filt_asink;
2002
2003 end:
2004     if (ret < 0)
2005         avfilter_graph_free(&is->agraph);
2006     return ret;
2007 }
2008 #endif  /* CONFIG_AVFILTER */
2009
2010 static int video_thread(void *arg)
2011 {
2012     AVPacket pkt = { 0 };
2013     VideoState *is = arg;
2014     AVFrame *frame = av_frame_alloc();
2015     double pts;
2016     double duration;
2017     int ret;
2018     int serial = 0;
2019     AVRational tb = is->video_st->time_base;
2020     AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2021
2022 #if CONFIG_AVFILTER
2023     AVFilterGraph *graph = avfilter_graph_alloc();
2024     AVFilterContext *filt_out = NULL, *filt_in = NULL;
2025     int last_w = 0;
2026     int last_h = 0;
2027     enum AVPixelFormat last_format = -2;
2028     int last_serial = -1;
2029     int last_vfilter_idx = 0;
2030 #endif
2031
2032     for (;;) {
2033         while (is->paused && !is->videoq.abort_request)
2034             SDL_Delay(10);
2035
2036         av_free_packet(&pkt);
2037
2038         ret = get_video_frame(is, frame, &pkt, &serial);
2039         if (ret < 0)
2040             goto the_end;
2041         if (!ret)
2042             continue;
2043
2044 #if CONFIG_AVFILTER
2045         if (   last_w != frame->width
2046             || last_h != frame->height
2047             || last_format != frame->format
2048             || last_serial != serial
2049             || last_vfilter_idx != is->vfilter_idx) {
2050             av_log(NULL, AV_LOG_DEBUG,
2051                    "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2052                    last_w, last_h,
2053                    (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2054                    frame->width, frame->height,
2055                    (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
2056             avfilter_graph_free(&graph);
2057             graph = avfilter_graph_alloc();
2058             if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2059                 SDL_Event event;
2060                 event.type = FF_QUIT_EVENT;
2061                 event.user.data1 = is;
2062                 SDL_PushEvent(&event);
2063                 goto the_end;
2064             }
2065             filt_in  = is->in_video_filter;
2066             filt_out = is->out_video_filter;
2067             last_w = frame->width;
2068             last_h = frame->height;
2069             last_format = frame->format;
2070             last_serial = serial;
2071             last_vfilter_idx = is->vfilter_idx;
2072             frame_rate = filt_out->inputs[0]->frame_rate;
2073         }
2074
2075         ret = av_buffersrc_add_frame(filt_in, frame);
2076         if (ret < 0)
2077             goto the_end;
2078
2079         while (ret >= 0) {
2080             is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2081
2082             ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2083             if (ret < 0) {
2084                 if (ret == AVERROR_EOF)
2085                     is->video_finished = serial;
2086                 ret = 0;
2087                 break;
2088             }
2089
2090             is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2091             if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2092                 is->frame_last_filter_delay = 0;
2093             tb = filt_out->inputs[0]->time_base;
2094 #endif
2095             duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2096             pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2097             ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
2098             av_frame_unref(frame);
2099 #if CONFIG_AVFILTER
2100         }
2101 #endif
2102
2103         if (ret < 0)
2104             goto the_end;
2105     }
2106  the_end:
2107 #if CONFIG_AVFILTER
2108     avfilter_graph_free(&graph);
2109 #endif
2110     av_free_packet(&pkt);
2111     av_frame_free(&frame);
2112     return 0;
2113 }
2114
2115 static int subtitle_thread(void *arg)
2116 {
2117     VideoState *is = arg;
2118     Frame *sp;
2119     AVPacket pkt1, *pkt = &pkt1;
2120     int got_subtitle;
2121     int serial;
2122     double pts;
2123     int i, j;
2124     int r, g, b, y, u, v, a;
2125
2126     for (;;) {
2127         while (is->paused && !is->subtitleq.abort_request) {
2128             SDL_Delay(10);
2129         }
2130         if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2131             break;
2132
2133         if (pkt->data == flush_pkt.data) {
2134             avcodec_flush_buffers(is->subtitle_st->codec);
2135             continue;
2136         }
2137
2138         if (!(sp = frame_queue_peek_writable(&is->subpq)))
2139             return 0;
2140
2141        /* NOTE: ipts is the PTS of the _first_ picture beginning in
2142            this packet, if any */
2143         pts = 0;
2144         if (pkt->pts != AV_NOPTS_VALUE)
2145             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2146
2147         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
2148                                  &got_subtitle, pkt);
2149         if (got_subtitle && sp->sub.format == 0) {
2150             if (sp->sub.pts != AV_NOPTS_VALUE)
2151                 pts = sp->sub.pts / (double)AV_TIME_BASE;
2152             sp->pts = pts;
2153             sp->serial = serial;
2154
2155             for (i = 0; i < sp->sub.num_rects; i++)
2156             {
2157                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2158                 {
2159                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2160                     y = RGB_TO_Y_CCIR(r, g, b);
2161                     u = RGB_TO_U_CCIR(r, g, b, 0);
2162                     v = RGB_TO_V_CCIR(r, g, b, 0);
2163                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2164                 }
2165             }
2166
2167             /* now we can update the picture count */
2168             frame_queue_push(&is->subpq);
2169         } else if (got_subtitle) {
2170             avsubtitle_free(&sp->sub);
2171         }
2172         av_free_packet(pkt);
2173     }
2174     return 0;
2175 }
2176
2177 /* copy samples for viewing in editor window */
2178 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2179 {
2180     int size, len;
2181
2182     size = samples_size / sizeof(short);
2183     while (size > 0) {
2184         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2185         if (len > size)
2186             len = size;
2187         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2188         samples += len;
2189         is->sample_array_index += len;
2190         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2191             is->sample_array_index = 0;
2192         size -= len;
2193     }
2194 }
2195
2196 /* return the wanted number of samples to get better sync if sync_type is video
2197  * or external master clock */
2198 static int synchronize_audio(VideoState *is, int nb_samples)
2199 {
2200     int wanted_nb_samples = nb_samples;
2201
2202     /* if not master, then we try to remove or add samples to correct the clock */
2203     if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
2204         double diff, avg_diff;
2205         int min_nb_samples, max_nb_samples;
2206
2207         diff = get_clock(&is->audclk) - get_master_clock(is);
2208
2209         if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2210             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2211             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2212                 /* not enough measures to have a correct estimate */
2213                 is->audio_diff_avg_count++;
2214             } else {
2215                 /* estimate the A-V difference */
2216                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2217
2218                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2219                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2220                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2221                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2222                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2223                 }
2224                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2225                         diff, avg_diff, wanted_nb_samples - nb_samples,
2226                         is->audio_clock, is->audio_diff_threshold);
2227             }
2228         } else {
2229             /* too big difference : may be initial PTS errors, so
2230                reset A-V filter */
2231             is->audio_diff_avg_count = 0;
2232             is->audio_diff_cum       = 0;
2233         }
2234     }
2235
2236     return wanted_nb_samples;
2237 }
2238
2239 /**
2240  * Decode one audio frame and return its uncompressed size.
2241  *
2242  * The processed audio frame is decoded, converted if required, and
2243  * stored in is->audio_buf, with size in bytes given by the return
2244  * value.
2245  */
2246 static int audio_decode_frame(VideoState *is)
2247 {
2248     AVPacket *pkt_temp = &is->audio_pkt_temp;
2249     AVPacket *pkt = &is->audio_pkt;
2250     AVCodecContext *dec = is->audio_st->codec;
2251     int len1, data_size, resampled_data_size;
2252     int64_t dec_channel_layout;
2253     int got_frame;
2254     av_unused double audio_clock0;
2255     int wanted_nb_samples;
2256     AVRational tb;
2257     int ret;
2258     int reconfigure;
2259
2260     for (;;) {
2261         /* NOTE: the audio packet can contain several frames */
2262         while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2263             if (!is->frame) {
2264                 if (!(is->frame = av_frame_alloc()))
2265                     return AVERROR(ENOMEM);
2266             } else {
2267                 av_frame_unref(is->frame);
2268             }
2269
2270             if (is->audioq.serial != is->audio_pkt_temp_serial)
2271                 break;
2272
2273             if (is->paused)
2274                 return -1;
2275
2276             if (!is->audio_buf_frames_pending) {
2277                 len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2278                 if (len1 < 0) {
2279                     /* if error, we skip the frame */
2280                     pkt_temp->size = 0;
2281                     break;
2282                 }
2283
2284                 pkt_temp->dts =
2285                 pkt_temp->pts = AV_NOPTS_VALUE;
2286                 pkt_temp->data += len1;
2287                 pkt_temp->size -= len1;
2288                 if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2289                     pkt_temp->stream_index = -1;
2290                 if (!pkt_temp->data && !got_frame)
2291                     is->audio_finished = is->audio_pkt_temp_serial;
2292
2293                 if (!got_frame)
2294                     continue;
2295
2296                 tb = (AVRational){1, is->frame->sample_rate};
2297                 if (is->frame->pts != AV_NOPTS_VALUE)
2298                     is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2299                 else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2300                     is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2301                 else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2302 #if CONFIG_AVFILTER
2303                     is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2304 #else
2305                     is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_src.freq}, tb);
2306 #endif
2307
2308                 if (is->frame->pts != AV_NOPTS_VALUE)
2309                     is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2310
2311 #if CONFIG_AVFILTER
2312                 dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2313
2314                 reconfigure =
2315                     cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2316                                    is->frame->format, av_frame_get_channels(is->frame))    ||
2317                     is->audio_filter_src.channel_layout != dec_channel_layout ||
2318                     is->audio_filter_src.freq           != is->frame->sample_rate ||
2319                     is->audio_pkt_temp_serial           != is->audio_last_serial;
2320
2321                 if (reconfigure) {
2322                     char buf1[1024], buf2[1024];
2323                     av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2324                     av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2325                     av_log(NULL, AV_LOG_DEBUG,
2326                            "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2327                            is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2328                            is->frame->sample_rate, av_frame_get_channels(is->frame), av_get_sample_fmt_name(is->frame->format), buf2, is->audio_pkt_temp_serial);
2329
2330                     is->audio_filter_src.fmt            = is->frame->format;
2331                     is->audio_filter_src.channels       = av_frame_get_channels(is->frame);
2332                     is->audio_filter_src.channel_layout = dec_channel_layout;
2333                     is->audio_filter_src.freq           = is->frame->sample_rate;
2334                     is->audio_last_serial               = is->audio_pkt_temp_serial;
2335
2336                     if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2337                         return ret;
2338                 }
2339
2340                 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2341                     return ret;
2342 #endif
2343             }
2344 #if CONFIG_AVFILTER
2345             if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2346                 if (ret == AVERROR(EAGAIN)) {
2347                     is->audio_buf_frames_pending = 0;
2348                     continue;
2349                 }
2350                 if (ret == AVERROR_EOF)
2351                     is->audio_finished = is->audio_pkt_temp_serial;
2352                 return ret;
2353             }
2354             is->audio_buf_frames_pending = 1;
2355             tb = is->out_audio_filter->inputs[0]->time_base;
2356 #endif
2357
2358             data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
2359                                                    is->frame->nb_samples,
2360                                                    is->frame->format, 1);
2361
2362             dec_channel_layout =
2363                 (is->frame->channel_layout && av_frame_get_channels(is->frame) == av_get_channel_layout_nb_channels(is->frame->channel_layout)) ?
2364                 is->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(is->frame));
2365             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2366
2367             if (is->frame->format        != is->audio_src.fmt            ||
2368                 dec_channel_layout       != is->audio_src.channel_layout ||
2369                 is->frame->sample_rate   != is->audio_src.freq           ||
2370                 (wanted_nb_samples       != is->frame->nb_samples && !is->swr_ctx)) {
2371                 swr_free(&is->swr_ctx);
2372                 is->swr_ctx = swr_alloc_set_opts(NULL,
2373                                                  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2374                                                  dec_channel_layout,           is->frame->format, is->frame->sample_rate,
2375                                                  0, NULL);
2376                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2377                     av_log(NULL, AV_LOG_ERROR,
2378                            "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2379                             is->frame->sample_rate, av_get_sample_fmt_name(is->frame->format), av_frame_get_channels(is->frame),
2380                             is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2381                     break;
2382                 }
2383                 is->audio_src.channel_layout = dec_channel_layout;
2384                 is->audio_src.channels       = av_frame_get_channels(is->frame);
2385                 is->audio_src.freq = is->frame->sample_rate;
2386                 is->audio_src.fmt = is->frame->format;
2387             }
2388
2389             if (is->swr_ctx) {
2390                 const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2391                 uint8_t **out = &is->audio_buf1;
2392                 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2393                 int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2394                 int len2;
2395                 if (out_size < 0) {
2396                     av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2397                     break;
2398                 }
2399                 if (wanted_nb_samples != is->frame->nb_samples) {
2400                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2401                                                 wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2402                         av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2403                         break;
2404                     }
2405                 }
2406                 av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2407                 if (!is->audio_buf1)
2408                     return AVERROR(ENOMEM);
2409                 len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2410                 if (len2 < 0) {
2411                     av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2412                     break;
2413                 }
2414                 if (len2 == out_count) {
2415                     av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2416                     swr_init(is->swr_ctx);
2417                 }
2418                 is->audio_buf = is->audio_buf1;
2419                 resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2420             } else {
2421                 is->audio_buf = is->frame->data[0];
2422                 resampled_data_size = data_size;
2423             }
2424
2425             audio_clock0 = is->audio_clock;
2426             /* update the audio clock with the pts */
2427             if (is->frame->pts != AV_NOPTS_VALUE)
2428                 is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2429             else
2430                 is->audio_clock = NAN;
2431             is->audio_clock_serial = is->audio_pkt_temp_serial;
2432 #ifdef DEBUG
2433             {
2434                 static double last_clock;
2435                 printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2436                        is->audio_clock - last_clock,
2437                        is->audio_clock, audio_clock0);
2438                 last_clock = is->audio_clock;
2439             }
2440 #endif
2441             return resampled_data_size;
2442         }
2443
2444         /* free the current packet */
2445         if (pkt->data)
2446             av_free_packet(pkt);
2447         memset(pkt_temp, 0, sizeof(*pkt_temp));
2448         pkt_temp->stream_index = -1;
2449
2450         if (is->audioq.abort_request) {
2451             return -1;
2452         }
2453
2454         if (is->audioq.nb_packets == 0)
2455             SDL_CondSignal(is->continue_read_thread);
2456
2457         /* read next packet */
2458         if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2459             return -1;
2460
2461         if (pkt->data == flush_pkt.data) {
2462             avcodec_flush_buffers(dec);
2463             is->audio_buf_frames_pending = 0;
2464             is->audio_frame_next_pts = AV_NOPTS_VALUE;
2465             if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek)
2466                 is->audio_frame_next_pts = is->audio_st->start_time;
2467         }
2468
2469         *pkt_temp = *pkt;
2470     }
2471 }
2472
2473 /* prepare a new audio buffer */
2474 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2475 {
2476     VideoState *is = opaque;
2477     int audio_size, len1;
2478
2479     audio_callback_time = av_gettime_relative();
2480
2481     while (len > 0) {
2482         if (is->audio_buf_index >= is->audio_buf_size) {
2483            audio_size = audio_decode_frame(is);
2484            if (audio_size < 0) {
2485                 /* if error, just output silence */
2486                is->audio_buf      = is->silence_buf;
2487                is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2488            } else {
2489                if (is->show_mode != SHOW_MODE_VIDEO)
2490                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2491                is->audio_buf_size = audio_size;
2492            }
2493            is->audio_buf_index = 0;
2494         }
2495         len1 = is->audio_buf_size - is->audio_buf_index;
2496         if (len1 > len)
2497             len1 = len;
2498         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2499         len -= len1;
2500         stream += len1;
2501         is->audio_buf_index += len1;
2502     }
2503     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2504     /* Let's assume the audio driver that is used by SDL has two periods. */
2505     if (!isnan(is->audio_clock)) {
2506         set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2507         sync_clock_to_slave(&is->extclk, &is->audclk);
2508     }
2509 }
2510
2511 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2512 {
2513     SDL_AudioSpec wanted_spec, spec;
2514     const char *env;
2515     static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2516     static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2517     int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2518
2519     env = SDL_getenv("SDL_AUDIO_CHANNELS");
2520     if (env) {
2521         wanted_nb_channels = atoi(env);
2522         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2523     }
2524     if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2525         wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2526         wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2527     }
2528     wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2529     wanted_spec.channels = wanted_nb_channels;
2530     wanted_spec.freq = wanted_sample_rate;
2531     if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2532         av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2533         return -1;
2534     }
2535     while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2536         next_sample_rate_idx--;
2537     wanted_spec.format = AUDIO_S16SYS;
2538     wanted_spec.silence = 0;
2539     wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2540     wanted_spec.callback = sdl_audio_callback;
2541     wanted_spec.userdata = opaque;
2542     while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2543         av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2544                wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2545         wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2546         if (!wanted_spec.channels) {
2547             wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2548             wanted_spec.channels = wanted_nb_channels;
2549             if (!wanted_spec.freq) {
2550                 av_log(NULL, AV_LOG_ERROR,
2551                        "No more combinations to try, audio open failed\n");
2552                 return -1;
2553             }
2554         }
2555         wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2556     }
2557     if (spec.format != AUDIO_S16SYS) {
2558         av_log(NULL, AV_LOG_ERROR,
2559                "SDL advised audio format %d is not supported!\n", spec.format);
2560         return -1;
2561     }
2562     if (spec.channels != wanted_spec.channels) {
2563         wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2564         if (!wanted_channel_layout) {
2565             av_log(NULL, AV_LOG_ERROR,
2566                    "SDL advised channel count %d is not supported!\n", spec.channels);
2567             return -1;
2568         }
2569     }
2570
2571     audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2572     audio_hw_params->freq = spec.freq;
2573     audio_hw_params->channel_layout = wanted_channel_layout;
2574     audio_hw_params->channels =  spec.channels;
2575     audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2576     audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2577     if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2578         av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2579         return -1;
2580     }
2581     return spec.size;
2582 }
2583
2584 /* open a given stream. Return 0 if OK */
2585 static int stream_component_open(VideoState *is, int stream_index)
2586 {
2587     AVFormatContext *ic = is->ic;
2588     AVCodecContext *avctx;
2589     AVCodec *codec;
2590     const char *forced_codec_name = NULL;
2591     AVDictionary *opts;
2592     AVDictionaryEntry *t = NULL;
2593     int sample_rate, nb_channels;
2594     int64_t channel_layout;
2595     int ret;
2596     int stream_lowres = lowres;
2597
2598     if (stream_index < 0 || stream_index >= ic->nb_streams)
2599         return -1;
2600     avctx = ic->streams[stream_index]->codec;
2601
2602     codec = avcodec_find_decoder(avctx->codec_id);
2603
2604     switch(avctx->codec_type){
2605         case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index; forced_codec_name =    audio_codec_name; break;
2606         case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2607         case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index; forced_codec_name =    video_codec_name; break;
2608     }
2609     if (forced_codec_name)
2610         codec = avcodec_find_decoder_by_name(forced_codec_name);
2611     if (!codec) {
2612         if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2613                                       "No codec could be found with name '%s'\n", forced_codec_name);
2614         else                   av_log(NULL, AV_LOG_WARNING,
2615                                       "No codec could be found with id %d\n", avctx->codec_id);
2616         return -1;
2617     }
2618
2619     avctx->codec_id = codec->id;
2620     avctx->workaround_bugs   = workaround_bugs;
2621     if(stream_lowres > av_codec_get_max_lowres(codec)){
2622         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2623                 av_codec_get_max_lowres(codec));
2624         stream_lowres = av_codec_get_max_lowres(codec);
2625     }
2626     av_codec_set_lowres(avctx, stream_lowres);
2627
2628     if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2629     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2630     if(codec->capabilities & CODEC_CAP_DR1)
2631         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2632
2633     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2634     if (!av_dict_get(opts, "threads", NULL, 0))
2635         av_dict_set(&opts, "threads", "auto", 0);
2636     if (stream_lowres)
2637         av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2638     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2639         av_dict_set(&opts, "refcounted_frames", "1", 0);
2640     if (avcodec_open2(avctx, codec, &opts) < 0)
2641         return -1;
2642     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2643         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2644         return AVERROR_OPTION_NOT_FOUND;
2645     }
2646
2647     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2648     switch (avctx->codec_type) {
2649     case AVMEDIA_TYPE_AUDIO:
2650 #if CONFIG_AVFILTER
2651         {
2652             AVFilterLink *link;
2653
2654             is->audio_filter_src.freq           = avctx->sample_rate;
2655             is->audio_filter_src.channels       = avctx->channels;
2656             is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2657             is->audio_filter_src.fmt            = avctx->sample_fmt;
2658             if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2659                 return ret;
2660             link = is->out_audio_filter->inputs[0];
2661             sample_rate    = link->sample_rate;
2662             nb_channels    = link->channels;
2663             channel_layout = link->channel_layout;
2664         }
2665 #else
2666         sample_rate    = avctx->sample_rate;
2667         nb_channels    = avctx->channels;
2668         channel_layout = avctx->channel_layout;
2669 #endif
2670
2671         /* prepare audio output */
2672         if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2673             return ret;
2674         is->audio_hw_buf_size = ret;
2675         is->audio_src = is->audio_tgt;
2676         is->audio_buf_size  = 0;
2677         is->audio_buf_index = 0;
2678
2679         /* init averaging filter */
2680         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2681         is->audio_diff_avg_count = 0;
2682         /* since we do not have a precise anough audio fifo fullness,
2683            we correct audio sync only if larger than this threshold */
2684         is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2685
2686         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2687         memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2688         is->audio_pkt_temp.stream_index = -1;
2689
2690         is->audio_stream = stream_index;
2691         is->audio_st = ic->streams[stream_index];
2692
2693         packet_queue_start(&is->audioq);
2694         SDL_PauseAudio(0);
2695         break;
2696     case AVMEDIA_TYPE_VIDEO:
2697         is->video_stream = stream_index;
2698         is->video_st = ic->streams[stream_index];
2699
2700         packet_queue_start(&is->videoq);
2701         is->video_tid = SDL_CreateThread(video_thread, is);
2702         is->queue_attachments_req = 1;
2703         break;
2704     case AVMEDIA_TYPE_SUBTITLE:
2705         is->subtitle_stream = stream_index;
2706         is->subtitle_st = ic->streams[stream_index];
2707         packet_queue_start(&is->subtitleq);
2708
2709         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2710         break;
2711     default:
2712         break;
2713     }
2714     return 0;
2715 }
2716
2717 static void stream_component_close(VideoState *is, int stream_index)
2718 {
2719     AVFormatContext *ic = is->ic;
2720     AVCodecContext *avctx;
2721
2722     if (stream_index < 0 || stream_index >= ic->nb_streams)
2723         return;
2724     avctx = ic->streams[stream_index]->codec;
2725
2726     switch (avctx->codec_type) {
2727     case AVMEDIA_TYPE_AUDIO:
2728         packet_queue_abort(&is->audioq);
2729
2730         SDL_CloseAudio();
2731
2732         packet_queue_flush(&is->audioq);
2733         av_free_packet(&is->audio_pkt);
2734         swr_free(&is->swr_ctx);
2735         av_freep(&is->audio_buf1);
2736         is->audio_buf1_size = 0;
2737         is->audio_buf = NULL;
2738         av_frame_free(&is->frame);
2739
2740         if (is->rdft) {
2741             av_rdft_end(is->rdft);
2742             av_freep(&is->rdft_data);
2743             is->rdft = NULL;
2744             is->rdft_bits = 0;
2745         }
2746 #if CONFIG_AVFILTER
2747         avfilter_graph_free(&is->agraph);
2748 #endif
2749         break;
2750     case AVMEDIA_TYPE_VIDEO:
2751         packet_queue_abort(&is->videoq);
2752
2753         /* note: we also signal this mutex to make sure we deblock the
2754            video thread in all cases */
2755         frame_queue_signal(&is->pictq);
2756
2757         SDL_WaitThread(is->video_tid, NULL);
2758
2759         packet_queue_flush(&is->videoq);
2760         break;
2761     case AVMEDIA_TYPE_SUBTITLE:
2762         packet_queue_abort(&is->subtitleq);
2763
2764         /* note: we also signal this mutex to make sure we deblock the
2765            video thread in all cases */
2766         frame_queue_signal(&is->subpq);
2767
2768         SDL_WaitThread(is->subtitle_tid, NULL);
2769
2770         packet_queue_flush(&is->subtitleq);
2771         break;
2772     default:
2773         break;
2774     }
2775
2776     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2777     avcodec_close(avctx);
2778     switch (avctx->codec_type) {
2779     case AVMEDIA_TYPE_AUDIO:
2780         is->audio_st = NULL;
2781         is->audio_stream = -1;
2782         break;
2783     case AVMEDIA_TYPE_VIDEO:
2784         is->video_st = NULL;
2785         is->video_stream = -1;
2786         break;
2787     case AVMEDIA_TYPE_SUBTITLE:
2788         is->subtitle_st = NULL;
2789         is->subtitle_stream = -1;
2790         break;
2791     default:
2792         break;
2793     }
2794 }
2795
2796 static int decode_interrupt_cb(void *ctx)
2797 {
2798     VideoState *is = ctx;
2799     return is->abort_request;
2800 }
2801
2802 static int is_realtime(AVFormatContext *s)
2803 {
2804     if(   !strcmp(s->iformat->name, "rtp")
2805        || !strcmp(s->iformat->name, "rtsp")
2806        || !strcmp(s->iformat->name, "sdp")
2807     )
2808         return 1;
2809
2810     if(s->pb && (   !strncmp(s->filename, "rtp:", 4)
2811                  || !strncmp(s->filename, "udp:", 4)
2812                 )
2813     )
2814         return 1;
2815     return 0;
2816 }
2817
2818 /* this thread gets the stream from the disk or the network */
2819 static int read_thread(void *arg)
2820 {
2821     VideoState *is = arg;
2822     AVFormatContext *ic = NULL;
2823     int err, i, ret;
2824     int st_index[AVMEDIA_TYPE_NB];
2825     AVPacket pkt1, *pkt = &pkt1;
2826     int eof = 0;
2827     int64_t stream_start_time;
2828     int pkt_in_play_range = 0;
2829     AVDictionaryEntry *t;
2830     AVDictionary **opts;
2831     int orig_nb_streams;
2832     SDL_mutex *wait_mutex = SDL_CreateMutex();
2833
2834     memset(st_index, -1, sizeof(st_index));
2835     is->last_video_stream = is->video_stream = -1;
2836     is->last_audio_stream = is->audio_stream = -1;
2837     is->last_subtitle_stream = is->subtitle_stream = -1;
2838
2839     ic = avformat_alloc_context();
2840     ic->interrupt_callback.callback = decode_interrupt_cb;
2841     ic->interrupt_callback.opaque = is;
2842     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2843     if (err < 0) {
2844         print_error(is->filename, err);
2845         ret = -1;
2846         goto fail;
2847     }
2848     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2849         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2850         ret = AVERROR_OPTION_NOT_FOUND;
2851         goto fail;
2852     }
2853     is->ic = ic;
2854
2855     if (genpts)
2856         ic->flags |= AVFMT_FLAG_GENPTS;
2857
2858     av_format_inject_global_side_data(ic);
2859
2860     opts = setup_find_stream_info_opts(ic, codec_opts);
2861     orig_nb_streams = ic->nb_streams;
2862
2863     err = avformat_find_stream_info(ic, opts);
2864     if (err < 0) {
2865         av_log(NULL, AV_LOG_WARNING,
2866                "%s: could not find codec parameters\n", is->filename);
2867         ret = -1;
2868         goto fail;
2869     }
2870     for (i = 0; i < orig_nb_streams; i++)
2871         av_dict_free(&opts[i]);
2872     av_freep(&opts);
2873
2874     if (ic->pb)
2875         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2876
2877     if (seek_by_bytes < 0)
2878         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2879
2880     is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2881
2882     if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2883         window_title = av_asprintf("%s - %s", t->value, input_filename);
2884
2885     /* if seeking requested, we execute it */
2886     if (start_time != AV_NOPTS_VALUE) {
2887         int64_t timestamp;
2888
2889         timestamp = start_time;
2890         /* add the stream start time */
2891         if (ic->start_time != AV_NOPTS_VALUE)
2892             timestamp += ic->start_time;
2893         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2894         if (ret < 0) {
2895             av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2896                     is->filename, (double)timestamp / AV_TIME_BASE);
2897         }
2898     }
2899
2900     is->realtime = is_realtime(ic);
2901
2902     for (i = 0; i < ic->nb_streams; i++)
2903         ic->streams[i]->discard = AVDISCARD_ALL;
2904     if (!video_disable)
2905         st_index[AVMEDIA_TYPE_VIDEO] =
2906             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2907                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2908     if (!audio_disable)
2909         st_index[AVMEDIA_TYPE_AUDIO] =
2910             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2911                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2912                                 st_index[AVMEDIA_TYPE_VIDEO],
2913                                 NULL, 0);
2914     if (!video_disable && !subtitle_disable)
2915         st_index[AVMEDIA_TYPE_SUBTITLE] =
2916             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2917                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2918                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2919                                  st_index[AVMEDIA_TYPE_AUDIO] :
2920                                  st_index[AVMEDIA_TYPE_VIDEO]),
2921                                 NULL, 0);
2922     if (show_status) {
2923         av_dump_format(ic, 0, is->filename, 0);
2924     }
2925
2926     is->show_mode = show_mode;
2927     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2928         AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2929         AVCodecContext *avctx = st->codec;
2930         AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
2931         if (avctx->width)
2932             set_default_window_size(avctx->width, avctx->height, sar);
2933     }
2934
2935     /* open the streams */
2936     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2937         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2938     }
2939
2940     ret = -1;
2941     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2942         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2943     }
2944     if (is->show_mode == SHOW_MODE_NONE)
2945         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2946
2947     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2948         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2949     }
2950
2951     if (is->video_stream < 0 && is->audio_stream < 0) {
2952         av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2953                is->filename);
2954         ret = -1;
2955         goto fail;
2956     }
2957
2958     if (infinite_buffer < 0 && is->realtime)
2959         infinite_buffer = 1;
2960
2961     for (;;) {
2962         if (is->abort_request)
2963             break;
2964         if (is->paused != is->last_paused) {
2965             is->last_paused = is->paused;
2966             if (is->paused)
2967                 is->read_pause_return = av_read_pause(ic);
2968             else
2969                 av_read_play(ic);
2970         }
2971 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2972         if (is->paused &&
2973                 (!strcmp(ic->iformat->name, "rtsp") ||
2974                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2975             /* wait 10 ms to avoid trying to get another packet */
2976             /* XXX: horrible */
2977             SDL_Delay(10);
2978             continue;
2979         }
2980 #endif
2981         if (is->seek_req) {
2982             int64_t seek_target = is->seek_pos;
2983             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2984             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2985 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2986 //      of the seek_pos/seek_rel variables
2987
2988             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2989             if (ret < 0) {
2990                 av_log(NULL, AV_LOG_ERROR,
2991                        "%s: error while seeking\n", is->ic->filename);
2992             } else {
2993                 if (is->audio_stream >= 0) {
2994                     packet_queue_flush(&is->audioq);
2995                     packet_queue_put(&is->audioq, &flush_pkt);
2996                 }
2997                 if (is->subtitle_stream >= 0) {
2998                     packet_queue_flush(&is->subtitleq);
2999                     packet_queue_put(&is->subtitleq, &flush_pkt);
3000                 }
3001                 if (is->video_stream >= 0) {
3002                     packet_queue_flush(&is->videoq);
3003                     packet_queue_put(&is->videoq, &flush_pkt);
3004                 }
3005                 if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3006                    set_clock(&is->extclk, NAN, 0);
3007                 } else {
3008                    set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3009                 }
3010             }
3011             is->seek_req = 0;
3012             is->queue_attachments_req = 1;
3013             eof = 0;
3014             if (is->paused)
3015                 step_to_next_frame(is);
3016         }
3017         if (is->queue_attachments_req) {
3018             if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3019                 AVPacket copy;
3020                 if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3021                     goto fail;
3022                 packet_queue_put(&is->videoq, &copy);
3023                 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
3024             }
3025             is->queue_attachments_req = 0;
3026         }
3027
3028         /* if the queue are full, no need to read more */
3029         if (infinite_buffer<1 &&
3030               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3031             || (   (is->audioq   .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3032                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3033                     || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
3034                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3035             /* wait 10 ms */
3036             SDL_LockMutex(wait_mutex);
3037             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3038             SDL_UnlockMutex(wait_mutex);
3039             continue;
3040         }
3041         if (!is->paused &&
3042             (!is->audio_st || is->audio_finished == is->audioq.serial) &&
3043             (!is->video_st || (is->video_finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3044             if (loop != 1 && (!loop || --loop)) {
3045                 stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3046             } else if (autoexit) {
3047                 ret = AVERROR_EOF;
3048                 goto fail;
3049             }
3050         }
3051         if (eof) {
3052             if (is->video_stream >= 0)
3053                 packet_queue_put_nullpacket(&is->videoq, is->video_stream);
3054             if (is->audio_stream >= 0)
3055                 packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
3056             if (is->subtitle_stream >= 0)
3057                 packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
3058             SDL_Delay(10);
3059             eof=0;
3060             continue;
3061         }
3062         ret = av_read_frame(ic, pkt);
3063         if (ret < 0) {
3064             if (ret == AVERROR_EOF || avio_feof(ic->pb))
3065                 eof = 1;
3066             if (ic->pb && ic->pb->error)
3067                 break;
3068             SDL_LockMutex(wait_mutex);
3069             SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3070             SDL_UnlockMutex(wait_mutex);
3071             continue;
3072         }
3073         /* check if packet is in play range specified by user, then queue, otherwise discard */
3074         stream_start_time = ic->streams[pkt->stream_index]->start_time;
3075         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3076                 (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3077                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
3078                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3079                 <= ((double)duration / 1000000);
3080         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3081             packet_queue_put(&is->audioq, pkt);
3082         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3083                    && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3084             packet_queue_put(&is->videoq, pkt);
3085         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3086             packet_queue_put(&is->subtitleq, pkt);
3087         } else {
3088             av_free_packet(pkt);
3089         }
3090     }
3091     /* wait until the end */
3092     while (!is->abort_request) {
3093         SDL_Delay(100);
3094     }
3095
3096     ret = 0;
3097  fail:
3098     /* close each stream */
3099     if (is->audio_stream >= 0)
3100         stream_component_close(is, is->audio_stream);
3101     if (is->video_stream >= 0)
3102         stream_component_close(is, is->video_stream);
3103     if (is->subtitle_stream >= 0)
3104         stream_component_close(is, is->subtitle_stream);
3105     if (is->ic) {
3106         avformat_close_input(&is->ic);
3107     }
3108
3109     if (ret != 0) {
3110         SDL_Event event;
3111
3112         event.type = FF_QUIT_EVENT;
3113         event.user.data1 = is;
3114         SDL_PushEvent(&event);
3115     }
3116     SDL_DestroyMutex(wait_mutex);
3117     return 0;
3118 }
3119
3120 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3121 {
3122     VideoState *is;
3123
3124     is = av_mallocz(sizeof(VideoState));
3125     if (!is)
3126         return NULL;
3127     av_strlcpy(is->filename, filename, sizeof(is->filename));
3128     is->iformat = iformat;
3129     is->ytop    = 0;
3130     is->xleft   = 0;
3131
3132     /* start video display */
3133     if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3134         goto fail;
3135     if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3136         goto fail;
3137
3138     packet_queue_init(&is->videoq);
3139     packet_queue_init(&is->audioq);
3140     packet_queue_init(&is->subtitleq);
3141
3142     is->continue_read_thread = SDL_CreateCond();
3143
3144     init_clock(&is->vidclk, &is->videoq.serial);
3145     init_clock(&is->audclk, &is->audioq.serial);
3146     init_clock(&is->extclk, &is->extclk.serial);
3147     is->audio_clock_serial = -1;
3148     is->audio_last_serial = -1;
3149     is->av_sync_type = av_sync_type;
3150     is->read_tid     = SDL_CreateThread(read_thread, is);
3151     if (!is->read_tid) {
3152 fail:
3153         stream_close(is);
3154         return NULL;
3155     }
3156     return is;
3157 }
3158
3159 static void stream_cycle_channel(VideoState *is, int codec_type)
3160 {
3161     AVFormatContext *ic = is->ic;
3162     int start_index, stream_index;
3163     int old_index;
3164     AVStream *st;
3165     AVProgram *p = NULL;
3166     int nb_streams = is->ic->nb_streams;
3167
3168     if (codec_type == AVMEDIA_TYPE_VIDEO) {
3169         start_index = is->last_video_stream;
3170         old_index = is->video_stream;
3171     } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3172         start_index = is->last_audio_stream;
3173         old_index = is->audio_stream;
3174     } else {
3175         start_index = is->last_subtitle_stream;
3176         old_index = is->subtitle_stream;
3177     }
3178     stream_index = start_index;
3179
3180     if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3181         p = av_find_program_from_stream(ic, NULL, is->video_stream);
3182         if (p) {
3183             nb_streams = p->nb_stream_indexes;
3184             for (start_index = 0; start_index < nb_streams; start_index++)
3185                 if (p->stream_index[start_index] == stream_index)
3186                     break;
3187             if (start_index == nb_streams)
3188                 start_index = -1;
3189             stream_index = start_index;
3190         }
3191     }
3192
3193     for (;;) {
3194         if (++stream_index >= nb_streams)
3195         {
3196             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3197             {
3198                 stream_index = -1;
3199                 is->last_subtitle_stream = -1;
3200                 goto the_end;
3201             }
3202             if (start_index == -1)
3203                 return;
3204             stream_index = 0;
3205         }
3206         if (stream_index == start_index)
3207             return;
3208         st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3209         if (st->codec->codec_type == codec_type) {
3210             /* check that parameters are OK */
3211             switch (codec_type) {
3212             case AVMEDIA_TYPE_AUDIO:
3213                 if (st->codec->sample_rate != 0 &&
3214                     st->codec->channels != 0)
3215                     goto the_end;
3216                 break;
3217             case AVMEDIA_TYPE_VIDEO:
3218             case AVMEDIA_TYPE_SUBTITLE:
3219                 goto the_end;
3220             default:
3221                 break;
3222             }
3223         }
3224     }
3225  the_end:
3226     if (p && stream_index != -1)
3227         stream_index = p->stream_index[stream_index];
3228     av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3229            av_get_media_type_string(codec_type),
3230            old_index,
3231            stream_index);
3232
3233     stream_component_close(is, old_index);
3234     stream_component_open(is, stream_index);
3235 }
3236
3237
3238 static void toggle_full_screen(VideoState *is)
3239 {
3240 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3241     /* OS X needs to reallocate the SDL overlays */
3242     int i;
3243     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3244         is->pictq.queue[i].reallocate = 1;
3245 #endif
3246     is_full_screen = !is_full_screen;
3247     video_open(is, 1, NULL);
3248 }
3249
3250 static void toggle_audio_display(VideoState *is)
3251 {
3252     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3253     int next = is->show_mode;
3254     do {
3255         next = (next + 1) % SHOW_MODE_NB;
3256     } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3257     if (is->show_mode != next) {
3258         fill_rectangle(screen,
3259                     is->xleft, is->ytop, is->width, is->height,
3260                     bgcolor, 1);
3261         is->force_refresh = 1;
3262         is->show_mode = next;
3263     }
3264 }
3265
3266 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3267     double remaining_time = 0.0;
3268     SDL_PumpEvents();
3269     while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3270         if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
3271             SDL_ShowCursor(0);
3272             cursor_hidden = 1;
3273         }
3274         if (remaining_time > 0.0)
3275             av_usleep((int64_t)(remaining_time * 1000000.0));
3276         remaining_time = REFRESH_RATE;
3277         if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3278             video_refresh(is, &remaining_time);
3279         SDL_PumpEvents();
3280     }
3281 }
3282
3283 static void seek_chapter(VideoState *is, int incr)
3284 {
3285     int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3286     int i;
3287
3288     if (!is->ic->nb_chapters)
3289         return;
3290
3291     /* find the current chapter */
3292     for (i = 0; i < is->ic->nb_chapters; i++) {
3293         AVChapter *ch = is->ic->chapters[i];
3294         if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3295             i--;
3296             break;
3297         }
3298     }
3299
3300     i += incr;
3301     i = FFMAX(i, 0);
3302     if (i >= is->ic->nb_chapters)
3303         return;
3304
3305     av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3306     stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3307                                  AV_TIME_BASE_Q), 0, 0);
3308 }
3309
3310 /* handle an event sent by the GUI */
3311 static void event_loop(VideoState *cur_stream)
3312 {
3313     SDL_Event event;
3314     double incr, pos, frac;
3315
3316     for (;;) {
3317         double x;
3318         refresh_loop_wait_event(cur_stream, &event);
3319         switch (event.type) {
3320         case SDL_KEYDOWN:
3321             if (exit_on_keydown) {
3322                 do_exit(cur_stream);
3323                 break;
3324             }
3325             switch (event.key.keysym.sym) {
3326             case SDLK_ESCAPE:
3327             case SDLK_q:
3328                 do_exit(cur_stream);
3329                 break;
3330             case SDLK_f:
3331                 toggle_full_screen(cur_stream);
3332                 cur_stream->force_refresh = 1;
3333                 break;
3334             case SDLK_p:
3335             case SDLK_SPACE:
3336                 toggle_pause(cur_stream);
3337                 break;
3338             case SDLK_s: // S: Step to next frame
3339                 step_to_next_frame(cur_stream);
3340                 break;
3341             case SDLK_a:
3342                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3343                 break;
3344             case SDLK_v:
3345                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3346                 break;
3347             case SDLK_c:
3348                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
3349                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
3350                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3351                 break;
3352             case SDLK_t:
3353                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
3354                 break;
3355             case SDLK_w:
3356 #if CONFIG_AVFILTER
3357                 if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3358                     if (++cur_stream->vfilter_idx >= nb_vfilters)
3359                         cur_stream->vfilter_idx = 0;
3360                 } else {
3361                     cur_stream->vfilter_idx = 0;
3362                     toggle_audio_display(cur_stream);
3363                 }
3364 #else
3365                 toggle_audio_display(cur_stream);
3366 #endif
3367                 break;
3368             case SDLK_PAGEUP:
3369                 if (cur_stream->ic->nb_chapters <= 1) {
3370                     incr = 600.0;
3371                     goto do_seek;
3372                 }
3373                 seek_chapter(cur_stream, 1);
3374                 break;
3375             case SDLK_PAGEDOWN:
3376                 if (cur_stream->ic->nb_chapters <= 1) {
3377                     incr = -600.0;
3378                     goto do_seek;
3379                 }
3380                 seek_chapter(cur_stream, -1);
3381                 break;
3382             case SDLK_LEFT:
3383                 incr = -10.0;
3384                 goto do_seek;
3385             case SDLK_RIGHT:
3386                 incr = 10.0;
3387                 goto do_seek;
3388             case SDLK_UP:
3389                 incr = 60.0;
3390                 goto do_seek;
3391             case SDLK_DOWN:
3392                 incr = -60.0;
3393             do_seek:
3394                     if (seek_by_bytes) {
3395                         pos = -1;
3396                         if (pos < 0 && cur_stream->video_stream >= 0)
3397                             pos = frame_queue_last_pos(&cur_stream->pictq);
3398                         if (pos < 0 && cur_stream->audio_stream >= 0)
3399                             pos = cur_stream->audio_pkt.pos;
3400                         if (pos < 0)
3401                             pos = avio_tell(cur_stream->ic->pb);
3402                         if (cur_stream->ic->bit_rate)
3403                             incr *= cur_stream->ic->bit_rate / 8.0;
3404                         else
3405                             incr *= 180000.0;
3406                         pos += incr;
3407                         stream_seek(cur_stream, pos, incr, 1);
3408                     } else {
3409                         pos = get_master_clock(cur_stream);
3410                         if (isnan(pos))
3411                             pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3412                         pos += incr;
3413                         if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3414                             pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3415                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3416                     }
3417                 break;
3418             default:
3419                 break;
3420             }
3421             break;
3422         case SDL_VIDEOEXPOSE:
3423             cur_stream->force_refresh = 1;
3424             break;
3425         case SDL_MOUSEBUTTONDOWN:
3426             if (exit_on_mousedown) {
3427                 do_exit(cur_stream);
3428                 break;
3429             }
3430         case SDL_MOUSEMOTION:
3431             if (cursor_hidden) {
3432                 SDL_ShowCursor(1);
3433                 cursor_hidden = 0;
3434             }
3435             cursor_last_shown = av_gettime_relative();
3436             if (event.type == SDL_MOUSEBUTTONDOWN) {
3437                 x = event.button.x;
3438             } else {
3439                 if (event.motion.state != SDL_PRESSED)
3440                     break;
3441                 x = event.motion.x;
3442             }
3443                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3444                     uint64_t size =  avio_size(cur_stream->ic->pb);
3445                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3446                 } else {
3447                     int64_t ts;
3448                     int ns, hh, mm, ss;
3449                     int tns, thh, tmm, tss;
3450                     tns  = cur_stream->ic->duration / 1000000LL;
3451                     thh  = tns / 3600;
3452                     tmm  = (tns % 3600) / 60;
3453                     tss  = (tns % 60);
3454                     frac = x / cur_stream->width;
3455                     ns   = frac * tns;
3456                     hh   = ns / 3600;
3457                     mm   = (ns % 3600) / 60;
3458                     ss   = (ns % 60);
3459                     av_log(NULL, AV_LOG_INFO,